summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-11-28 16:10:59 -0500
committerGitHub <noreply@github.com>2022-11-28 16:10:59 -0500
commit181490e38f9c00f511300d325a07fb674b726588 (patch)
treeb02caddc256c5d1b3540b7fdb7e6f358424c0ced
parent6869ccb6b62bc6cc3ec03cc9bb7312b2194fb73e (diff)
parent3583908d7c6decaeb40a4fd1ee2e6b29680a186b (diff)
Merge pull request #4837 from Algo-devops-service/relstable3.12.2v3.12.2-stable
-rw-r--r--.circleci/config.yml42
-rw-r--r--.github/workflows/benchmarks.yml6
-rw-r--r--.github/workflows/build.yml4
-rw-r--r--.github/workflows/codegen_verification.yml6
-rw-r--r--.github/workflows/reviewdog.yml6
-rw-r--r--.golangci-warnings.yml15
-rw-r--r--.golangci.yml46
-rw-r--r--SECURITY.md2
-rw-r--r--agreement/actions.go23
-rw-r--r--agreement/actiontype_string.go2
-rw-r--r--agreement/asyncVoteVerifier.go8
-rw-r--r--agreement/bundle.go6
-rw-r--r--agreement/cadaver.go2
-rw-r--r--agreement/cryptoVerifier.go19
-rw-r--r--agreement/cryptoVerifier_test.go20
-rw-r--r--agreement/demux.go10
-rw-r--r--agreement/errors.go23
-rw-r--r--agreement/events.go25
-rw-r--r--agreement/events_test.go88
-rw-r--r--agreement/eventtype_string.go2
-rw-r--r--agreement/message.go12
-rw-r--r--agreement/message_test.go50
-rw-r--r--agreement/msgp_gen.go5659
-rw-r--r--agreement/msgp_gen_test.go1560
-rw-r--r--agreement/persistence.go104
-rw-r--r--agreement/persistence_test.go141
-rw-r--r--agreement/player.go5
-rw-r--r--agreement/player_permutation_test.go22
-rw-r--r--agreement/player_test.go20
-rw-r--r--agreement/proposalManager.go5
-rw-r--r--agreement/proposalStore.go8
-rw-r--r--agreement/proposalTable.go12
-rw-r--r--agreement/proposalTable_test.go69
-rw-r--r--agreement/proposalTracker.go4
-rw-r--r--agreement/proposalTrackerContract.go2
-rw-r--r--agreement/pseudonode.go4
-rw-r--r--agreement/router.go13
-rw-r--r--agreement/service.go4
-rw-r--r--agreement/sort.go84
-rw-r--r--agreement/sort_test.go60
-rw-r--r--agreement/voteAggregator.go4
-rw-r--r--agreement/voteAggregator_test.go2
-rw-r--r--agreement/voteAuxiliary.go2
-rw-r--r--agreement/voteTracker.go11
-rw-r--r--agreement/voteTrackerContract.go2
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go19
-rw-r--r--catchup/fetcher_test.go2
-rw-r--r--catchup/ledgerFetcher.go8
-rw-r--r--catchup/service.go1
-rw-r--r--cmd/algoh/blockWatcher.go15
-rw-r--r--cmd/algoh/blockWatcher_test.go4
-rw-r--r--cmd/algoh/blockstats.go23
-rw-r--r--cmd/algoh/blockstats_test.go68
-rw-r--r--cmd/algoh/client.go7
-rw-r--r--cmd/algoh/deadman.go6
-rw-r--r--cmd/algoh/mockClient.go32
-rw-r--r--cmd/catchpointdump/database.go16
-rw-r--r--cmd/catchpointdump/file.go153
-rw-r--r--cmd/catchpointdump/net.go7
-rw-r--r--cmd/dispenser/index.html.tpl80
-rw-r--r--cmd/dispenser/server.go62
-rw-r--r--cmd/goal/README.md147
-rw-r--r--cmd/goal/account.go207
-rw-r--r--cmd/goal/accountsList.go4
-rw-r--r--cmd/goal/application.go242
-rw-r--r--cmd/goal/asset.go80
-rw-r--r--cmd/goal/box.go118
-rw-r--r--cmd/goal/clerk.go26
-rw-r--r--cmd/goal/commands.go13
-rw-r--r--cmd/goal/examples/boxes.teal60
-rw-r--r--cmd/goal/examples/clear.teal2
-rw-r--r--cmd/goal/formatting.go13
-rw-r--r--cmd/goal/formatting_test.go71
-rw-r--r--cmd/goal/interact.go10
-rw-r--r--cmd/goal/ledger.go2
-rw-r--r--cmd/goal/messages.go5
-rw-r--r--cmd/goal/node.go20
-rw-r--r--cmd/loadgenerator/main.go6
-rw-r--r--cmd/netgoal/README.md52
-rw-r--r--cmd/netgoal/generate.go57
-rw-r--r--cmd/netgoal/network.go16
-rw-r--r--cmd/opdoc/tmLanguage.go12
-rw-r--r--cmd/pingpong/README.md8
-rw-r--r--cmd/pingpong/runCmd.go42
-rw-r--r--cmd/tealdbg/dryrunRequest.go6
-rw-r--r--cmd/tealdbg/localLedger.go10
-rw-r--r--components/mocks/mockCatchpointCatchupAccessor.go6
-rw-r--r--config/config_test.go6
-rw-r--r--config/consensus.go47
-rw-r--r--config/localTemplate.go10
-rw-r--r--config/local_defaults.go4
-rw-r--r--config/version.go2
-rw-r--r--daemon/algod/api/Makefile23
-rw-r--r--daemon/algod/api/algod.oas2.json337
-rw-r--r--daemon/algod/api/algod.oas3.yml433
-rw-r--r--daemon/algod/api/client/restClient.go214
-rw-r--r--daemon/algod/api/generated_server.yml12
-rw-r--r--daemon/algod/api/generated_types.yml11
-rw-r--r--daemon/algod/api/private_server.yml11
-rw-r--r--daemon/algod/api/private_types.yml10
-rw-r--r--daemon/algod/api/server/common/handlers.go3
-rw-r--r--daemon/algod/api/server/router.go12
-rw-r--r--daemon/algod/api/server/v2/account.go88
-rw-r--r--daemon/algod/api/server/v2/account_test.go16
-rw-r--r--daemon/algod/api/server/v2/dryrun.go58
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go331
-rw-r--r--daemon/algod/api/server/v2/errors.go1
-rw-r--r--daemon/algod/api/server/v2/generated/model/model_types.yml8
-rw-r--r--daemon/algod/api/server/v2/generated/model/types.go1102
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml19
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go359
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml19
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go832
-rw-r--r--daemon/algod/api/server/v2/generated/participating/private/private_routes.yml19
-rw-r--r--daemon/algod/api/server/v2/generated/participating/private/routes.go389
-rw-r--r--daemon/algod/api/server/v2/generated/participating/public/public_routes.yml19
-rw-r--r--daemon/algod/api/server/v2/generated/participating/public/routes.go414
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go495
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go841
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go1128
-rw-r--r--daemon/algod/api/server/v2/generated/types.go919
-rw-r--r--daemon/algod/api/server/v2/handlers.go266
-rw-r--r--daemon/algod/api/server/v2/handlers_test.go41
-rw-r--r--daemon/algod/api/server/v2/test/handlers_resources_test.go38
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go78
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go17
-rw-r--r--daemon/algod/api/server/v2/utils.go47
-rw-r--r--daemon/algod/api/swagger.go25
-rw-r--r--data/account/participationRegistry.go2
-rw-r--r--data/account/registeryDbOps.go2
-rw-r--r--data/accountManager.go6
-rw-r--r--data/basics/msgp_gen.go152
-rw-r--r--data/basics/userBalance.go24
-rw-r--r--data/basics/userBalance_test.go97
-rw-r--r--data/pools/transactionPool.go16
-rw-r--r--data/transactions/application.go23
-rw-r--r--data/transactions/application_test.go9
-rw-r--r--data/transactions/json_test.go96
-rw-r--r--data/transactions/logic/README.md111
-rw-r--r--data/transactions/logic/README_in.md71
-rw-r--r--data/transactions/logic/TEAL_opcodes.md122
-rw-r--r--data/transactions/logic/assembler.go177
-rw-r--r--data/transactions/logic/assembler_test.go201
-rw-r--r--data/transactions/logic/backwardCompat_test.go2
-rw-r--r--data/transactions/logic/box.go318
-rw-r--r--data/transactions/logic/box_test.go602
-rw-r--r--data/transactions/logic/debugger_test.go4
-rw-r--r--data/transactions/logic/doc.go77
-rw-r--r--data/transactions/logic/doc_test.go4
-rw-r--r--data/transactions/logic/eval.go397
-rw-r--r--data/transactions/logic/evalAppTxn_test.go41
-rw-r--r--data/transactions/logic/evalCrypto_test.go34
-rw-r--r--data/transactions/logic/evalStateful_test.go285
-rw-r--r--data/transactions/logic/eval_test.go623
-rw-r--r--data/transactions/logic/export_test.go14
-rw-r--r--data/transactions/logic/fields.go37
-rw-r--r--data/transactions/logic/fields_string.go15
-rw-r--r--data/transactions/logic/fields_test.go64
-rw-r--r--data/transactions/logic/frames.go7
-rw-r--r--data/transactions/logic/frames_test.go2
-rw-r--r--data/transactions/logic/langspec.json140
-rw-r--r--data/transactions/logic/ledger_test.go179
-rw-r--r--data/transactions/logic/opcodes.go22
-rw-r--r--data/transactions/logic/pairing.go3
-rw-r--r--data/transactions/logic/parsing.go105
-rw-r--r--data/transactions/logic/parsing_test.go139
-rw-r--r--data/transactions/logic/teal.tmLanguage.json8
-rw-r--r--data/transactions/msgp_gen.go1641
-rw-r--r--data/transactions/msgp_gen_test.go60
-rw-r--r--data/transactions/teal_test.go2
-rw-r--r--data/transactions/transaction.go21
-rw-r--r--data/transactions/transaction_test.go59
-rw-r--r--data/transactions/verify/txn.go104
-rw-r--r--data/txHandler.go88
-rw-r--r--data/txHandler_test.go409
-rw-r--r--data/txntest/txn.go50
-rwxr-xr-xdocker/releases/build_releases.sh2
-rw-r--r--gen/generate.go41
-rw-r--r--gen/generate_test.go166
-rw-r--r--gen/resources/genesis-balance.json290
-rw-r--r--gen/resources/genesis-base.json290
-rw-r--r--gen/walletData.go28
-rw-r--r--go.mod52
-rw-r--r--go.sum143
-rw-r--r--installer/config.json.example4
-rw-r--r--installer/genesis/alphanet/genesis.json220
-rw-r--r--ledger/accountdb.go431
-rw-r--r--ledger/accountdb_test.go973
-rw-r--r--ledger/acctonline.go2
-rw-r--r--ledger/acctupdates.go396
-rw-r--r--ledger/acctupdates_test.go327
-rw-r--r--ledger/apply/application_test.go2
-rw-r--r--ledger/apply/payment.go9
-rw-r--r--ledger/apptxn_test.go (renamed from ledger/internal/apptxn_test.go)803
-rw-r--r--ledger/boxtxn_test.go688
-rw-r--r--ledger/catchpointfileheader.go1
-rw-r--r--ledger/catchpointtracker.go306
-rw-r--r--ledger/catchpointtracker_test.go142
-rw-r--r--ledger/catchpointwriter.go233
-rw-r--r--ledger/catchpointwriter_test.go644
-rw-r--r--ledger/catchupaccessor.go105
-rw-r--r--ledger/catchupaccessor_test.go51
-rw-r--r--ledger/double_test.go (renamed from ledger/internal/double_test.go)44
-rw-r--r--ledger/eval_simple_test.go545
-rw-r--r--ledger/evalindexer.go12
-rw-r--r--ledger/evalindexer_test.go5
-rw-r--r--ledger/hashkind_string.go26
-rw-r--r--ledger/internal/appcow.go33
-rw-r--r--ledger/internal/appcow_test.go98
-rw-r--r--ledger/internal/applications.go266
-rw-r--r--ledger/internal/applications_test.go365
-rw-r--r--ledger/internal/assetcow.go26
-rw-r--r--ledger/internal/cow.go41
-rw-r--r--ledger/internal/cow_test.go6
-rw-r--r--ledger/internal/eval.go115
-rw-r--r--ledger/internal/eval_blackbox_test.go1256
-rw-r--r--ledger/internal/eval_test.go10
-rw-r--r--ledger/internal/prefetcher/prefetcher.go170
-rw-r--r--ledger/internal/prefetcher/prefetcher_alignment_test.go129
-rw-r--r--ledger/internal/prefetcher/prefetcher_test.go141
-rw-r--r--ledger/ledger.go22
-rw-r--r--ledger/ledger_test.go14
-rw-r--r--ledger/ledgercore/accountdata.go19
-rw-r--r--ledger/ledgercore/error.go4
-rw-r--r--ledger/ledgercore/statedelta.go177
-rw-r--r--ledger/ledgercore/statedelta_test.go29
-rw-r--r--ledger/lruaccts.go40
-rw-r--r--ledger/lrukv.go132
-rw-r--r--ledger/lrukv_test.go240
-rw-r--r--ledger/lruresources.go40
-rw-r--r--ledger/msgp_gen.go775
-rw-r--r--ledger/msgp_gen_test.go80
-rw-r--r--ledger/persistedkvs.go143
-rw-r--r--ledger/persistedkvs_test.go175
-rw-r--r--ledger/persistedresources_list.go2
-rw-r--r--ledger/simple_test.go187
-rw-r--r--ledger/testing/consensusRange.go106
-rw-r--r--ledger/testing/consensusRange_test.go58
-rw-r--r--ledger/testing/randomAccounts.go5
-rw-r--r--ledger/tracker.go6
-rw-r--r--ledger/trackerdb.go41
-rw-r--r--ledger/txnbench_test.go (renamed from ledger/internal/txnbench_test.go)7
-rw-r--r--ledger/txtail_test.go2
-rw-r--r--libgoal/libgoal.go195
-rw-r--r--libgoal/participation.go6
-rw-r--r--libgoal/transactions.go74
-rw-r--r--logging/telemetryspec/event.go31
-rw-r--r--logging/telemetryspec/metric.go8
-rw-r--r--logging/usage.go10
-rw-r--r--netdeploy/network.go9
-rw-r--r--netdeploy/remote/deployedNetwork.go45
-rw-r--r--netdeploy/remote/deployedNetwork_test.go25
-rw-r--r--network/limitlistener/rejectingLimitListener.go4
-rw-r--r--network/requestTracker.go4
-rw-r--r--network/wsNetwork.go61
-rw-r--r--network/wsNetwork_test.go230
-rw-r--r--network/wsPeer.go27
-rw-r--r--network/wsPeer_test.go1
-rw-r--r--node/node.go15
-rw-r--r--protocol/codec_tester.go13
-rw-r--r--protocol/consensus.go10
-rw-r--r--rpcs/txService.go2
-rw-r--r--rpcs/txService_test.go5
-rw-r--r--rpcs/txSyncer.go4
-rw-r--r--rpcs/txSyncer_test.go14
-rw-r--r--scripts/buildtools/versions4
-rwxr-xr-xscripts/dump_genesis.sh3
-rw-r--r--shared/pingpong/accounts.go210
-rw-r--r--shared/pingpong/config.go10
-rw-r--r--shared/pingpong/pingpong.go75
-rw-r--r--stateproof/builder.go8
-rw-r--r--test/commandandcontrol/cc_agent/main.go6
-rw-r--r--test/commandandcontrol/cc_client/main.go2
-rw-r--r--test/commandandcontrol/cc_service/main.go4
-rw-r--r--test/e2e-go/cli/goal/clerk_test.go4
-rw-r--r--test/e2e-go/features/accountPerf/sixMillion_test.go25
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go305
-rw-r--r--test/e2e-go/features/devmode/devmode_test.go5
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go8
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go8
-rw-r--r--test/e2e-go/features/participation/participationExpiration_test.go14
-rw-r--r--test/e2e-go/features/participation/participationRewards_test.go27
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go110
-rw-r--r--test/e2e-go/features/transactions/accountv2_test.go344
-rw-r--r--test/e2e-go/features/transactions/app_pages_test.go30
-rw-r--r--test/e2e-go/features/transactions/application_test.go2
-rw-r--r--test/e2e-go/features/transactions/asset_test.go268
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go12
-rw-r--r--test/e2e-go/features/transactions/proof_test.go10
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go7
-rw-r--r--test/e2e-go/perf/basic_test.go9
-rw-r--r--test/e2e-go/restAPI/restClient_test.go533
-rw-r--r--test/e2e-go/stress/transactions/createManyAndGoOnline_test.go8
-rw-r--r--test/e2e-go/upgrades/application_support_test.go28
-rw-r--r--test/e2e-go/upgrades/stateproof_participation_test.go15
-rw-r--r--test/framework/fixtures/libgoalFixture.go18
-rw-r--r--test/framework/fixtures/restClientFixture.go47
-rw-r--r--test/heapwatch/block_history.py19
-rw-r--r--test/heapwatch/block_history_plot.py55
-rw-r--r--test/heapwatch/heapWatch.py13
-rw-r--r--test/heapwatch/metrics_delta.py49
-rwxr-xr-xtest/scripts/e2e_subs/app-assets.sh10
-rwxr-xr-xtest/scripts/e2e_subs/asset-misc.sh12
-rwxr-xr-xtest/scripts/e2e_subs/box-search.sh135
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-simple.sh1
-rwxr-xr-xtest/scripts/e2e_subs/rest-applications-endpoint.sh2
-rwxr-xr-xtest/scripts/e2e_subs/rest-assets-endpoint.sh2
-rw-r--r--test/scripts/e2e_subs/tealprogs/boxes.teal60
-rw-r--r--test/testdata/configs/config-v24.json3
-rw-r--r--test/testdata/configs/config-v25.json108
-rwxr-xr-xtest/testdata/deployednettemplates/generate-recipe/generate_network.py16
-rw-r--r--test/testdata/deployednettemplates/recipes/README.md19
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/genesis.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/topology.json60
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile20
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py26
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json1174
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json1818
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json3
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json142
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/README.md6
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json15
-rw-r--r--test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json3
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/network-partition/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/Makefile11
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py20
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/genesis.json325
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/net.json56
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/node.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/relay.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/topology.json6
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario2/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/Makefile2
-rw-r--r--tools/debug/doberman/logo.go22
-rw-r--r--tools/debug/doberman/main.go109
-rw-r--r--tools/network/resolver_test.go14
-rw-r--r--util/db/dbutil.go19
-rw-r--r--util/metrics/counter_test.go6
-rw-r--r--util/metrics/gauge_test.go2
-rw-r--r--util/metrics/metrics.go27
-rw-r--r--util/tcpinfo.go72
-rw-r--r--util/tcpinfo_darwin.go49
-rw-r--r--util/tcpinfo_linux.go129
-rw-r--r--util/tcpinfo_noop.go (renamed from ledger/internal/export_test.go)16
357 files changed, 31349 insertions, 13559 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 81b4805a2..a4567113a 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -7,7 +7,7 @@ version: 2.1
orbs:
win: circleci/windows@2.3.0
go: circleci/go@1.7.0
- slack: circleci/slack@4.4.2
+ slack: circleci/slack@4.10.1
parameters:
ubuntu_image:
@@ -64,6 +64,13 @@ executors:
mac_arm64_large:
<<: *executor-mac-arm64
+slack-fail-stop-step: &slack-fail-post-step
+ post-steps:
+ - slack/notify:
+ branch_pattern: "master,rel/beta,rel/nightly,rel/stable"
+ event: fail
+ template: basic_fail_1
+
# ===== Workflow Definitions =====
workflows:
version: 2
@@ -80,6 +87,7 @@ workflows:
- /rel\/.*/
- << pipeline.parameters.valid_nightly_branch >>
context: slack-secrets
+ <<: *slack-fail-post-step
- test:
name: << matrix.platform >>_test
@@ -91,6 +99,8 @@ workflows:
ignore:
- /rel\/.*/
- << pipeline.parameters.valid_nightly_branch >>
+ context: slack-secrets
+ <<: *slack-fail-post-step
- test_nightly:
name: << matrix.platform >>_test_nightly
@@ -99,6 +109,7 @@ workflows:
requires:
- << matrix.platform >>_build_nightly
context: slack-secrets
+ <<: *slack-fail-post-step
- integration:
name: << matrix.platform >>_integration
@@ -106,6 +117,8 @@ workflows:
<<: *matrix-default
filters:
<<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
- integration_nightly:
name: << matrix.platform >>_integration_nightly
@@ -114,6 +127,7 @@ workflows:
requires:
- << matrix.platform >>_build_nightly
context: slack-secrets
+ <<: *slack-fail-post-step
- e2e_expect:
name: << matrix.platform >>_e2e_expect
@@ -121,6 +135,8 @@ workflows:
<<: *matrix-default
filters:
<<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
- e2e_expect_nightly:
name: << matrix.platform >>_e2e_expect_nightly
@@ -129,6 +145,7 @@ workflows:
requires:
- << matrix.platform >>_build_nightly
context: slack-secrets
+ <<: *slack-fail-post-step
- e2e_subs:
name: << matrix.platform >>_e2e_subs
@@ -136,6 +153,8 @@ workflows:
<<: *matrix-default
filters:
<<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
- e2e_subs_nightly:
name: << matrix.platform >>_e2e_subs_nightly
@@ -146,6 +165,7 @@ workflows:
context:
- slack-secrets
- aws-secrets
+ <<: *slack-fail-post-step
- tests_verification_job:
name: << matrix.platform >>_<< matrix.job_type >>_verification
@@ -155,6 +175,8 @@ workflows:
job_type: ["test", "integration", "e2e_expect"]
requires:
- << matrix.platform >>_<< matrix.job_type >>
+ context: slack-secrets
+ <<: *slack-fail-post-step
- tests_verification_job_nightly:
name: << matrix.platform >>_<< matrix.job_type >>_verification
@@ -165,6 +187,7 @@ workflows:
requires:
- << matrix.platform >>_<< matrix.job_type >>
context: slack-secrets
+ <<: *slack-fail-post-step
- upload_binaries:
name: << matrix.platform >>_upload_binaries
@@ -183,6 +206,7 @@ workflows:
context:
- slack-secrets
- aws-secrets
+ <<: *slack-fail-post-step
#- windows_x64_build
@@ -207,9 +231,6 @@ jobs:
- go
- gimme
- .gimme
- - slack/notify: &slack-fail-event
- event: fail
- template: basic_fail_1
test:
parameters:
@@ -240,9 +261,6 @@ jobs:
result_subdir: << parameters.platform >>_test_nightly
no_output_timeout: 45m
- upload_coverage
- - slack/notify: &slack-fail-event
- event: fail
- template: basic_fail_1
integration:
parameters:
@@ -275,8 +293,6 @@ jobs:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_integration_nightly
no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
e2e_expect:
parameters:
@@ -309,8 +325,6 @@ jobs:
platform: << parameters.platform >>
result_subdir: << parameters.platform>>_e2e_expect_nightly
no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
e2e_subs:
parameters:
@@ -345,8 +359,6 @@ jobs:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_subs_nightly
no_output_timeout: 45m
- - slack/notify:
- <<: *slack-fail-event
windows_x64_build:
executor:
@@ -396,8 +408,6 @@ jobs:
- checkout
- tests_verification_command:
result_subdir: << parameters.platform >>_<< parameters.job_type >>
- - slack/notify:
- <<: *slack-fail-event
upload_binaries:
working_directory: << pipeline.parameters.build_dir >>/project
@@ -410,8 +420,6 @@ jobs:
- prepare_go
- upload_binaries_command:
platform: << parameters.platform >>
- - slack/notify:
- <<: *slack-fail-event
# ===== Command Definitions =====
commands:
diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml
index 65c0e90bf..51bacd0c8 100644
--- a/.github/workflows/benchmarks.yml
+++ b/.github/workflows/benchmarks.yml
@@ -49,3 +49,9 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
alert-threshold: '200%'
comment-on-alert: true
+ - name: Slack Notification
+ env:
+ SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
+ run: |
+ curl -X POST --data-urlencode "payload={\"text\": \"Benchmark workflow failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK
+ if: ${{ failure() && (contains(github.ref_name, 'rel/nightly') || contains(github.ref_name, 'rel/beta') || contains(github.ref_name, 'rel/stable') || contains(github.ref_name, 'master')) }}
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 58b462723..56ab06793 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -44,5 +44,5 @@ jobs:
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
run: |
- curl -X POST --data-urlencode "payload={\"text\": \"Nightly windows build test on Github failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK
- if: ${{ failure() && (contains(github.base_ref, 'rel/nightly') || contains(github.base_ref, 'rel/beta') || contains(github.base_ref, 'rel/stable')) }}
+ curl -X POST --data-urlencode "payload={\"text\": \"Build windows failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK
+ if: ${{ failure() && (contains(github.ref_name, 'rel/nightly') || contains(github.ref_name, 'rel/beta') || contains(github.ref_name, 'rel/stable') || contains(github.ref_name, 'master')) }}
diff --git a/.github/workflows/codegen_verification.yml b/.github/workflows/codegen_verification.yml
index cdeed288b..8b6bcf046 100644
--- a/.github/workflows/codegen_verification.yml
+++ b/.github/workflows/codegen_verification.yml
@@ -20,3 +20,9 @@ jobs:
export GOPATH="${GITHUB_WORKSPACE}/go"
cd go-algorand
scripts/travis/codegen_verification.sh
+ - name: Slack Notification
+ env:
+ SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
+ run: |
+ curl -X POST --data-urlencode "payload={\"text\": \"Codegen verification failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK
+ if: ${{ failure() && (contains(github.ref_name, 'rel/nightly') || contains(github.ref_name, 'rel/beta') || contains(github.ref_name, 'rel/stable') || contains(github.ref_name, 'master')) }}
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 736b8a6bc..d76ac49e8 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -92,3 +92,9 @@ jobs:
-filter-mode=added
-fail-on-error=false
-level=warning
+ - name: Slack Notification
+ env:
+ SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
+ run: |
+ curl -X POST --data-urlencode "payload={\"text\": \"Reviewdog failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK
+ if: ${{ failure() && (contains(github.ref_name, 'rel/nightly') || contains(github.ref_name, 'rel/beta') || contains(github.ref_name, 'rel/stable') || contains(github.ref_name, 'master')) }} \ No newline at end of file
diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml
index f0f2eee48..e3b8d22ff 100644
--- a/.golangci-warnings.yml
+++ b/.golangci-warnings.yml
@@ -8,8 +8,8 @@ linters:
- deadcode
- partitiontest
- structcheck
- - typecheck
- varcheck
+ - unconvert
- unused
@@ -52,24 +52,19 @@ issues:
# be more lenient with test code
- path: _test\.go
linters:
- - staticcheck
+ - deadcode
- structcheck
- - typecheck
- varcheck
- - deadcode
- - gosimple
+ - unconvert
- unused
# Add all linters here -- Comment this block out for testing linters
- path: test/linttest/lintissues\.go
linters:
- - staticcheck
+ - deadcode
- structcheck
- - typecheck
- varcheck
- - deadcode
- - gosimple
+ - unconvert
- unused
- - partitiontest
- path: crypto/secp256k1/secp256_test\.go
linters:
- partitiontest
diff --git a/.golangci.yml b/.golangci.yml
index 271c682e5..0122edf82 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -16,6 +16,7 @@ linters:
- revive
- staticcheck
- typecheck
+ - paralleltest
severity:
default-severity: error
@@ -33,6 +34,43 @@ linters-settings:
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).error
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warn
+ # We do this 121 times and never check the error.
+ - (*github.com/spf13/cobra.Command).MarkFlagRequired
+ govet:
+ settings:
+ printf:
+ # Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`).
+ # Default: []
+ funcs:
+ - (github.com/algorand/go-algorand/logging.Logger).Debugf
+ - (github.com/algorand/go-algorand/logging.Logger).Infof
+ - (github.com/algorand/go-algorand/logging.Logger).Warnf
+ - (github.com/algorand/go-algorand/logging.Logger).Errorf
+ - (github.com/algorand/go-algorand/logging.Logger).Fatalf
+ - (github.com/algorand/go-algorand/logging.Logger).Panicf
+ - (github.com/algorand/go-algorand/logging.Logger).Debugln
+ - (github.com/algorand/go-algorand/logging.Logger).Infoln
+ - (github.com/algorand/go-algorand/logging.Logger).Warnln
+ - (github.com/algorand/go-algorand/logging.Logger).Errorln
+ - (github.com/algorand/go-algorand/logging.Logger).Fatalln
+ - (github.com/algorand/go-algorand/logging.Logger).Panicln
+ - (github.com/algorand/go-algorand/logging.Logger).Debug
+ - (github.com/algorand/go-algorand/logging.Logger).Info
+ - (github.com/algorand/go-algorand/logging.Logger).Warn
+ - (github.com/algorand/go-algorand/logging.Logger).Error
+ - (github.com/algorand/go-algorand/logging.Logger).Fatal
+ - (github.com/algorand/go-algorand/logging.Logger).Panic
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).lineErrorf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportInfof
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportInfoln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorln
issues:
# Work our way back over time to be clean against all these
@@ -69,15 +107,19 @@ issues:
- path: _test\.go
linters:
- errcheck
- - gofmt
+ # - gofmt
- gosimple
- - govet
+ # - govet
- ineffassign
- misspell
- nolintlint
# - revive
- staticcheck
- typecheck
+ # Ignore missing parallel tests in existing packages
+ - path: (agreement|catchup|cmd|config|crypto|daemon|data|gen|ledger|logging|netdeploy|network|node|protocol|rpcs|shared|stateproof|test|tools|util).*_test.go
+ linters:
+ - paralleltest
# Add all linters here -- Comment this block out for testing linters
- path: test/linttest/lintissues\.go
linters:
diff --git a/SECURITY.md b/SECURITY.md
index 52b3bde6c..4438c3e9a 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -4,4 +4,4 @@ Algorand takes the security of the platform and of its users very seriously. We
If you believe that you have found a security vulnerability you may disclose it here:
-https://www.algorand.com/resources/blog/security
+https://immunefi.com/bounty/algorand/
diff --git a/agreement/actions.go b/agreement/actions.go
index 779d9d467..ca33c18db 100644
--- a/agreement/actions.go
+++ b/agreement/actions.go
@@ -26,8 +26,7 @@ import (
)
//go:generate stringer -type=actionType
-//msgp:ignore actionType
-type actionType int
+type actionType uint8
const (
noop actionType = iota
@@ -103,7 +102,7 @@ type networkAction struct {
UnauthenticatedVotes []unauthenticatedVote
- Err serializableError
+ Err *serializableError
}
func (a networkAction) t() actionType {
@@ -181,7 +180,7 @@ type cryptoAction struct {
Period period
Step step
Pinned bool
- TaskIndex int
+ TaskIndex uint64
}
func (a cryptoAction) t() actionType {
@@ -388,7 +387,7 @@ func (a pseudonodeAction) do(ctx context.Context, s *Service) {
case nil:
// no error.
persistCompleteEvents := s.persistState(persistStateDone)
- // we want to place there two one after the other. That way, the second would not get executed up until the first one is complete.
+ // we want to place these two one after the other. That way, the second would not get executed up until the first one is complete.
s.demux.prioritize(persistCompleteEvents)
s.demux.prioritize(voteEvents)
default:
@@ -403,12 +402,12 @@ func (a pseudonodeAction) do(ctx context.Context, s *Service) {
}
}
-func ignoreAction(e messageEvent, err serializableError) action {
- return networkAction{T: ignore, Err: err, h: e.Input.MessageHandle}
+func ignoreAction(e messageEvent, err *serializableError) action {
+ return networkAction{T: ignore, Err: err, h: e.Input.messageHandle}
}
-func disconnectAction(e messageEvent, err serializableError) action {
- return networkAction{T: disconnect, Err: err, h: e.Input.MessageHandle}
+func disconnectAction(e messageEvent, err *serializableError) action {
+ return networkAction{T: disconnect, Err: err, h: e.Input.messageHandle}
}
func broadcastAction(tag protocol.Tag, o interface{}) action {
@@ -427,7 +426,7 @@ func broadcastAction(tag protocol.Tag, o interface{}) action {
}
func relayAction(e messageEvent, tag protocol.Tag, o interface{}) action {
- a := networkAction{T: relay, h: e.Input.MessageHandle, Tag: tag}
+ a := networkAction{T: relay, h: e.Input.messageHandle, Tag: tag}
// TODO would be good to have compiler check this (and related) type switch
// by specializing one method per type
switch tag {
@@ -441,7 +440,7 @@ func relayAction(e messageEvent, tag protocol.Tag, o interface{}) action {
return a
}
-func verifyVoteAction(e messageEvent, r round, p period, taskIndex int) action {
+func verifyVoteAction(e messageEvent, r round, p period, taskIndex uint64) action {
return cryptoAction{T: verifyVote, M: e.Input, Round: r, Period: p, TaskIndex: taskIndex}
}
@@ -479,7 +478,7 @@ type checkpointAction struct {
Round round
Period period
Step step
- Err serializableError
+ Err *serializableError
done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy
}
diff --git a/agreement/actiontype_string.go b/agreement/actiontype_string.go
index c27b9138b..9272ec2cf 100644
--- a/agreement/actiontype_string.go
+++ b/agreement/actiontype_string.go
@@ -31,7 +31,7 @@ const _actionType_name = "noopignorebroadcastrelaydisconnectbroadcastVotesverify
var _actionType_index = [...]uint8{0, 4, 10, 19, 24, 34, 48, 58, 71, 83, 89, 100, 106, 112, 120, 129, 139}
func (i actionType) String() string {
- if i < 0 || i >= actionType(len(_actionType_index)-1) {
+ if i >= actionType(len(_actionType_index)-1) {
return "actionType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _actionType_name[_actionType_index[i]:_actionType_index[i+1]]
diff --git a/agreement/asyncVoteVerifier.go b/agreement/asyncVoteVerifier.go
index cb26b440e..877c92dfb 100644
--- a/agreement/asyncVoteVerifier.go
+++ b/agreement/asyncVoteVerifier.go
@@ -29,7 +29,7 @@ type asyncVerifyVoteRequest struct {
l LedgerReader
uv *unauthenticatedVote
uev *unauthenticatedEquivocationVote
- index int
+ index uint64
message message
// a channel that holds the response
@@ -39,7 +39,7 @@ type asyncVerifyVoteRequest struct {
type asyncVerifyVoteResponse struct {
v vote
ev equivocationVote
- index int
+ index uint64
message message
err error
cancelled bool
@@ -131,7 +131,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
}
}
-func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
+func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index uint64, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -151,7 +151,7 @@ func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader,
return nil
}
-func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
+func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index uint64, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
diff --git a/agreement/bundle.go b/agreement/bundle.go
index 211f67b5b..de297110e 100644
--- a/agreement/bundle.go
+++ b/agreement/bundle.go
@@ -202,7 +202,8 @@ func (b unauthenticatedBundle) verifyAsync(ctx context.Context, l LedgerReader,
rv := rawVote{Sender: auth.Sender, Round: b.Round, Period: b.Period, Step: b.Step, Proposal: b.Proposal}
uv := unauthenticatedVote{R: rv, Cred: auth.Cred, Sig: auth.Sig}
- avv.verifyVote(ctx, l, uv, i, message{}, results)
+
+ avv.verifyVote(ctx, l, uv, uint64(i), message{}, results) //nolint:errcheck // verifyVote will call EnqueueBacklog, which blocks until the verify task is queued, or returns an error when ctx.Done(), which we are already checking
}
// create verification requests for equivocation votes
@@ -222,7 +223,8 @@ func (b unauthenticatedBundle) verifyAsync(ctx context.Context, l LedgerReader,
Proposals: auth.Proposals,
Sigs: auth.Sigs,
}
- avv.verifyEqVote(ctx, l, uev, i, message{}, results)
+ avv.verifyEqVote(ctx, l, uev, uint64(i), message{}, results) //nolint:errcheck // verifyVote will call EnqueueBacklog, which blocks until the verify task is queued, or returns an error when ctx.Done(), which we are already checking
+
}
return func() (bundle, error) {
diff --git a/agreement/cadaver.go b/agreement/cadaver.go
index 7b0cb8e76..d3f626ada 100644
--- a/agreement/cadaver.go
+++ b/agreement/cadaver.go
@@ -123,7 +123,7 @@ func (c *cadaver) trySetup() bool {
if c.out.bytesWritten >= c.fileSizeTarget {
err := c.out.Close()
if err != nil {
- logging.Base().Warn("unable to close cadaver file : %v", err)
+ logging.Base().Warnf("unable to close cadaver file : %v", err)
}
err = os.Rename(c.filename(), c.filename()+".archive")
if err != nil {
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index cf6c466e5..ca4bceb66 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -82,37 +82,41 @@ type (
Quit()
}
+ //msgp:ignore cryptoVoteRequest
cryptoVoteRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoProposalRequest
cryptoProposalRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the cryptoResult.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the cryptoResult.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
Pinned bool // A flag that is set if this is a pinned value for the given round.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoBundleRequest
cryptoBundleRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
Certify bool // A flag that set if this is a cert bundle.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoResult
cryptoResult struct {
message
- Err serializableError
- TaskIndex int // the TaskIndex that was passed to the cryptoVerifier during the Verify call on the cryptoRequest.TaskIndex
- Cancelled bool // whether the corresponding request was cancelled before verification completed
+ Err *serializableError
+ TaskIndex uint64 // the TaskIndex that was passed to the cryptoVerifier during the Verify call on the cryptoRequest.TaskIndex
+ Cancelled bool // whether the corresponding request was cancelled before verification completed
}
// A poolCryptoVerifier uses asynchronous goroutines to implement cryptoVerifier.
@@ -146,9 +150,10 @@ type (
out chan cryptoResult
}
+ //msgp:ignore bundleFuture
bundleFuture struct {
message
- index int
+ index uint64
wait func() (bundle, error)
ctx context.Context
}
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index a42ffd9b0..21b78c601 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -93,7 +93,7 @@ func makeMessage(msgHandle int, tag protocol.Tag, sender basics.Address, l Ledge
}
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedVote: makeUnauthenticatedVote(l, sender, selection, voting, Round, Period, Step, proposal),
}
@@ -103,13 +103,13 @@ func makeMessage(msgHandle int, tag protocol.Tag, sender basics.Address, l Ledge
Block: e,
}
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedProposal: payload,
}
default: // protocol.VoteBundleTag
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedBundle: unauthenticatedBundle{
Round: Round,
@@ -180,9 +180,9 @@ func TestCryptoVerifierBuffers(t *testing.T) {
for _, msgType := range msgTypes {
for i := getSelectorCapacity(msgType) * 5; i > 0; i-- {
msg := <-verifier.Verified(msgType)
- _, has := usedMsgIDs[msg.MessageHandle]
+ _, has := usedMsgIDs[msg.messageHandle]
assert.True(t, has)
- delete(usedMsgIDs, msg.MessageHandle)
+ delete(usedMsgIDs, msg.messageHandle)
}
assert.False(t, verifier.ChannelFull(msgType))
assert.Zero(t, len(verifier.Verified(msgType)))
@@ -230,8 +230,8 @@ func TestCryptoVerifierBuffers(t *testing.T) {
}
msgIDMutex.Lock()
defer msgIDMutex.Unlock()
- _, has := usedMsgIDs[msg.MessageHandle]
- delete(usedMsgIDs, msg.MessageHandle)
+ _, has := usedMsgIDs[msg.messageHandle]
+ delete(usedMsgIDs, msg.messageHandle)
return assert.True(t, has)
}
@@ -333,7 +333,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
c := verifier.Verified(protocol.ProposalPayloadTag)
request := cryptoProposalRequest{
message: message{
- MessageHandle: MessageHandle(0),
+ messageHandle: MessageHandle(0),
Tag: protocol.ProposalPayloadTag,
UnauthenticatedProposal: proposals[0].unauthenticatedProposal,
},
@@ -402,11 +402,11 @@ func TestCryptoVerifierVerificationFailures(t *testing.T) {
cryptoVerifier := makeCryptoVerifier(nil, nil, voteVerifier, logging.TestingLog(t))
defer cryptoVerifier.Quit()
- cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: 14})
+ cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: uint64(14)})
// read the failed response from VerifiedVotes:
votesout := cryptoVerifier.VerifiedVotes()
voteResponse := <-votesout
require.Equal(t, context.Canceled, voteResponse.err)
require.True(t, voteResponse.cancelled)
- require.Equal(t, 14, voteResponse.index)
+ require.Equal(t, uint64(14), voteResponse.index)
}
diff --git a/agreement/demux.go b/agreement/demux.go
index 7379590d5..ad51038b4 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -140,11 +140,11 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol.
var msg message
switch tag {
case protocol.AgreementVoteTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedVote: o.(unauthenticatedVote)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedVote: o.(unauthenticatedVote)}
case protocol.VoteBundleTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedBundle: o.(unauthenticatedBundle)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedBundle: o.(unauthenticatedBundle)}
case protocol.ProposalPayloadTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, CompoundMessage: o.(compoundMessage)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, CompoundMessage: o.(compoundMessage)}
default:
err := fmt.Errorf("bad message tag: %v", tag)
d.UpdateEventsQueue(fmt.Sprintf("Tokenizing-%s", tag), 0)
@@ -167,7 +167,7 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol.
}
// verifyVote enqueues a vote message to be verified.
-func (d *demux) verifyVote(ctx context.Context, m message, taskIndex int, r round, p period) {
+func (d *demux) verifyVote(ctx context.Context, m message, taskIndex uint64, r round, p period) {
d.UpdateEventsQueue(eventQueueCryptoVerifierVote, 1)
d.monitor.inc(cryptoVerifierCoserviceType)
d.crypto.VerifyVote(ctx, cryptoVoteRequest{message: m, TaskIndex: taskIndex, Round: r, Period: p})
@@ -367,7 +367,7 @@ func setupCompoundMessage(l LedgerReader, m message) (res externalEvent) {
return
}
- tailmsg := message{MessageHandle: m.MessageHandle, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: compound.Proposal}
+ tailmsg := message{messageHandle: m.messageHandle, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: compound.Proposal}
synthetic := messageEvent{T: payloadPresent, Input: tailmsg}
proto, err := l.ConsensusVersion(ParamsRound(synthetic.ConsensusRound()))
synthetic = synthetic.AttachConsensusVersion(ConsensusVersionView{Err: makeSerErr(err), Version: proto}).(messageEvent)
diff --git a/agreement/errors.go b/agreement/errors.go
index 606e433b6..eae272b9f 100644
--- a/agreement/errors.go
+++ b/agreement/errors.go
@@ -16,38 +16,39 @@
package agreement
-import "fmt"
+import (
+ "fmt"
+)
// serializableError, or state machine error, is a serializable error that
// is correctly written to cadaver files.
-type serializableErrorUnderlying string
-type serializableError = *serializableErrorUnderlying
+type serializableError string
// implement error interface
-func (e serializableErrorUnderlying) Error() string {
+func (e serializableError) Error() string {
return string(e)
}
-func (e serializableErrorUnderlying) String() string {
+func (e serializableError) String() string {
return e.Error()
}
// makeSerErrStr returns an serializableError that formats as the given text.
-func makeSerErrStr(text string) serializableError {
- s := serializableErrorUnderlying(text)
+func makeSerErrStr(text string) *serializableError {
+ s := serializableError(text)
return &s
}
-func makeSerErrf(format string, a ...interface{}) serializableError {
- s := serializableErrorUnderlying(fmt.Sprintf(format, a...))
+func makeSerErrf(format string, a ...interface{}) *serializableError {
+ s := serializableError(fmt.Sprintf(format, a...))
return &s
}
// makeSerErr returns an serializableError that formats as the given error.
-func makeSerErr(err error) serializableError {
+func makeSerErr(err error) *serializableError {
if err == nil {
return nil
}
- s := serializableErrorUnderlying(err.Error())
+ s := serializableError(err.Error())
return &s
}
diff --git a/agreement/events.go b/agreement/events.go
index 61176872a..f418cc38f 100644
--- a/agreement/events.go
+++ b/agreement/events.go
@@ -43,7 +43,9 @@ type event interface {
// A ConsensusVersionView is a view of the consensus version as read from a
// LedgerReader, associated with some round.
type ConsensusVersionView struct {
- Err serializableError
+ _struct struct{} `codec:","`
+
+ Err *serializableError
Version protocol.ConsensusVersion
}
@@ -69,8 +71,7 @@ type externalEvent interface {
// type of the implementing struct.
//
//go:generate stringer -type=eventType
-//msgp:ignore eventType
-type eventType int
+type eventType uint8
const (
// none is returned by state machines which have no event to return
@@ -255,6 +256,7 @@ func (e emptyEvent) AttachConsensusVersion(v ConsensusVersionView) externalEvent
}
type messageEvent struct {
+ _struct struct{} `codec:","`
// {vote,bundle,payload}{Present,Verified}
T eventType
@@ -263,10 +265,10 @@ type messageEvent struct {
// Err is set if cryptographic verification was attempted and failed for
// Input.
- Err serializableError
+ Err *serializableError
// TaskIndex is optionally set to track a message as it is processed
// through cryptographic verification.
- TaskIndex int
+ TaskIndex uint64
// Tail is an optionally-set field which specifies an unauthenticated
// proposal which should be processed after Input is processed. Tail is
@@ -314,12 +316,15 @@ func (e messageEvent) AttachConsensusVersion(v ConsensusVersionView) externalEve
// freshnessData is bundled with filterableMessageEvent
// to allow for delegated freshness computation
type freshnessData struct {
+ _struct struct{} `codec:","`
+
PlayerRound round
PlayerPeriod period
PlayerStep step
PlayerLastConcluding step
}
+//msgp:ignore filterableMessageEvent
type filterableMessageEvent struct {
messageEvent
@@ -534,7 +539,7 @@ type payloadProcessedEvent struct {
// Err is set to be the reason the proposal payload was rejected in
// payloadRejected.
- Err serializableError
+ Err *serializableError
}
func (e payloadProcessedEvent) t() eventType {
@@ -558,7 +563,7 @@ type filteredEvent struct {
// Err is the reason cryptographic verification failed and is set for
// events {proposal,vote,bundle}Malformed.
- Err serializableError
+ Err *serializableError
}
func (e filteredEvent) t() eventType {
@@ -623,6 +628,7 @@ func (e pinnedValueEvent) ComparableStr() string {
}
type thresholdEvent struct {
+ _struct struct{} `codec:","`
// {{soft,cert,next}Threshold, none}
T eventType
@@ -818,6 +824,7 @@ func (e nextThresholdStatusRequestEvent) ComparableStr() string {
}
type nextThresholdStatusEvent struct {
+ _struct struct{} `codec:","`
// the result of a nextThresholdStatusRequest. Contains two bits of information,
// capturing four cases:
// Bottom = false, Proposal = unset/bottom --> received no next value thresholds
@@ -910,8 +917,8 @@ type checkpointEvent struct {
Round round
Period period
Step step
- Err serializableError // the error that was generated while storing the state to disk; nil on success.
- done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy.
+ Err *serializableError // the error that was generated while storing the state to disk; nil on success.
+ done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy.
}
func (e checkpointEvent) t() eventType {
diff --git a/agreement/events_test.go b/agreement/events_test.go
new file mode 100644
index 000000000..243dd0508
--- /dev/null
+++ b/agreement/events_test.go
@@ -0,0 +1,88 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+// TestSerializableErrorBackwardCompatible ensures Err field of type serializableError can be
+// properly decoded from ConsensusVersionView.
+// This test is only needed for agreement state serialization switch from reflection to msgp.
+func TestSerializableErrorBackwardCompatibility(t *testing.T) {
+
+ encodedEmpty, err := base64.StdEncoding.DecodeString("gqNFcnLAp1ZlcnNpb26jdjEw")
+ require.NoError(t, err)
+
+ encoded, err := base64.StdEncoding.DecodeString("gqNFcnKndGVzdGVycqdWZXJzaW9uo3YxMA==")
+ require.NoError(t, err)
+
+ // run on master f57a276 to get the encoded data for above
+ // cv := ConsensusVersionView{
+ // Err: nil,
+ // Version: protocol.ConsensusV10,
+ // }
+
+ // result := protocol.EncodeReflect(&cv)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ // se := serializableErrorUnderlying("testerr")
+ // cv = ConsensusVersionView{
+ // Err: &se,
+ // Version: protocol.ConsensusV10,
+ // }
+
+ // result = protocol.EncodeReflect(&cv)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ cvEmpty := ConsensusVersionView{
+ Err: nil,
+ Version: protocol.ConsensusV10,
+ }
+
+ se := serializableError("testerr")
+ cv := ConsensusVersionView{
+ Err: &se,
+ Version: protocol.ConsensusV10,
+ }
+
+ cv1 := ConsensusVersionView{}
+ err = protocol.Decode(encodedEmpty, &cv1)
+ require.NoError(t, err)
+
+ cv2 := ConsensusVersionView{}
+ err = protocol.DecodeReflect(encodedEmpty, &cv2)
+ require.NoError(t, err)
+
+ require.Equal(t, cv1, cv2)
+ require.Equal(t, cvEmpty, cv2)
+
+ cv1 = ConsensusVersionView{}
+ err = protocol.Decode(encoded, &cv1)
+ require.NoError(t, err)
+
+ cv2 = ConsensusVersionView{}
+ err = protocol.DecodeReflect(encoded, &cv2)
+ require.NoError(t, err)
+
+ require.Equal(t, cv1, cv2)
+ require.Equal(t, cv, cv2)
+}
diff --git a/agreement/eventtype_string.go b/agreement/eventtype_string.go
index f8ee701d8..9da84c1b9 100644
--- a/agreement/eventtype_string.go
+++ b/agreement/eventtype_string.go
@@ -54,7 +54,7 @@ const _eventType_name = "nonevotePresentpayloadPresentbundlePresentvoteVerifiedp
var _eventType_index = [...]uint16{0, 4, 15, 29, 42, 54, 69, 83, 100, 107, 118, 131, 144, 157, 176, 192, 204, 217, 231, 246, 261, 277, 293, 308, 322, 334, 342, 351, 362, 372, 389, 405, 431, 450, 471, 485, 501, 510, 523, 540}
func (i eventType) String() string {
- if i < 0 || i >= eventType(len(_eventType_index)-1) {
+ if i >= eventType(len(_eventType_index)-1) {
return "eventType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _eventType_name[_eventType_index[i]:_eventType_index[i+1]]
diff --git a/agreement/message.go b/agreement/message.go
index 33a6d23ce..a1f6a8c80 100644
--- a/agreement/message.go
+++ b/agreement/message.go
@@ -18,12 +18,20 @@ package agreement
import (
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/msgp/msgp"
)
// A message represents an internal message which is passed between components
// of the agreement service.
type message struct {
- MessageHandle
+ _struct struct{} `codec:","`
+
+ // this field is for backwards compatibility with crash state serialized using go-codec prior to explicit unexport.
+ // should be removed after the next consensus update.
+ MessageHandle msgp.Raw
+ // explicitly unexport this field since we can't define serializers for interface{} type
+ // the only implementation of this is gossip.messageMetadata which doesn't have exported fields to serialize.
+ messageHandle MessageHandle
Tag protocol.Tag
@@ -46,6 +54,8 @@ type message struct {
// These messages are concatenated as an optimization which prevents proposals
// from being dropped.
type compoundMessage struct {
+ _struct struct{} `codec:","`
+
Vote unauthenticatedVote
Proposal unauthenticatedProposal
}
diff --git a/agreement/message_test.go b/agreement/message_test.go
index 88c4b504b..76209a5f9 100644
--- a/agreement/message_test.go
+++ b/agreement/message_test.go
@@ -17,6 +17,7 @@
package agreement
import (
+ "encoding/base64"
"testing"
"github.com/stretchr/testify/require"
@@ -24,7 +25,9 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -80,3 +83,50 @@ func BenchmarkVoteDecoding(b *testing.B) {
decodeVote(msgBytes)
}
}
+
+// TestMessageBackwardCompatibility ensures MessageHandle field can be
+// properly decoded from message.
+// This test is only needed for agreement state serialization switch from reflection to msgp.
+func TestMessageBackwardCompatibility(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type messageMetadata struct {
+ raw network.IncomingMessage
+ }
+
+ encoded, err := base64.StdEncoding.DecodeString("iaZCdW5kbGWAr0NvbXBvdW5kTWVzc2FnZYKoUHJvcG9zYWyApFZvdGWArU1lc3NhZ2VIYW5kbGWAqFByb3Bvc2FsgKNUYWeiUFC1VW5hdXRoZW50aWNhdGVkQnVuZGxlgLdVbmF1dGhlbnRpY2F0ZWRQcm9wb3NhbICzVW5hdXRoZW50aWNhdGVkVm90ZYCkVm90ZYA=")
+ require.NoError(t, err)
+
+ // run on master f57a276 to get the encoded data for above
+ // msg := message{
+ // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}},
+ // Tag: protocol.ProposalPayloadTag,
+ // }
+
+ // result := protocol.EncodeReflect(&msg)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ // messages for all rounds after this change should not have MessageHandle set so clearing it out and re-encoding/decoding it should yield this
+ targetMessage := message{
+ Tag: protocol.ProposalPayloadTag,
+ }
+
+ var m1, m2, m3, m4 message
+ // Both msgp and reflection should decode the message containing old MessageHandle successfully
+ err = protocol.Decode(encoded, &m1)
+ require.NoError(t, err)
+ err = protocol.DecodeReflect(encoded, &m2)
+ require.NoError(t, err)
+ // after setting MessageHandle to nil both should re-encode and decode to same values
+ m1.MessageHandle = nil
+ m2.MessageHandle = nil
+ e1 := protocol.Encode(&m1)
+ e2 := protocol.EncodeReflect(&m2)
+ require.Equal(t, e1, e2)
+ err = protocol.DecodeReflect(e1, &m3)
+ require.NoError(t, err)
+ err = protocol.Decode(e2, &m4)
+ require.NoError(t, err)
+ require.Equal(t, m3, m4)
+ require.Equal(t, m3, targetMessage)
+}
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 4581b07a9..3c396226b 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -22,6 +22,30 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// ConsensusVersionView
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// actionType
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
+// blockAssembler
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// bundle
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -30,6 +54,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// compoundMessage
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// diskState
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// equivocationVote
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -46,6 +86,46 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// eventType
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
+// freshnessData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// message
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// messageEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// nextThresholdStatusEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// period
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -54,6 +134,22 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// periodRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// player
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposal
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -62,6 +158,54 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// proposalManager
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalSeeker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalStore
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTable
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTracker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTrackerContract
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposalValue
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -70,6 +214,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// proposalVoteCounter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposerSeed
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -86,6 +238,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// rootRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// roundRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// seedInput
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -102,13 +270,13 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// serializableErrorUnderlying
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
+// serializableError
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
//
// step
// |-----> MarshalMsg
@@ -118,6 +286,22 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// stepRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// thresholdEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// transmittedPayload
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -166,6 +350,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// voteAggregator
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// voteAuthenticator
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -174,6 +366,38 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// voteTracker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerContract
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerPeriod
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerRound
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z *Certificate) MarshalMsg(b []byte) (o []byte) {
@@ -518,6 +742,407 @@ func (z *Certificate) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *ConsensusVersionView) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Err"
+ o = append(o, 0x82, 0xa3, 0x45, 0x72, 0x72)
+ if (*z).Err == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendString(o, string(*(*z).Err))
+ }
+ // string "Version"
+ o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = (*z).Version.MarshalMsg(o)
+ return
+}
+
+func (_ *ConsensusVersionView) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ConsensusVersionView)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ConsensusVersionView) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0003 string
+ zb0003, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0003)
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Version")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = ConsensusVersionView{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Err":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0004 string
+ zb0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0004)
+ }
+ }
+ case "Version":
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Version")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ConsensusVersionView) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ConsensusVersionView)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ConsensusVersionView) Msgsize() (s int) {
+ s = 1 + 4
+ if (*z).Err == nil {
+ s += msgp.NilSize
+ } else {
+ s += msgp.StringPrefixSize + len(string(*(*z).Err))
+ }
+ s += 8 + (*z).Version.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ConsensusVersionView) MsgIsZero() bool {
+ return ((*z).Err == nil) && ((*z).Version.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z actionType) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+func (_ actionType) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(actionType)
+ if !ok {
+ _, ok = (z).(*actionType)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *actionType) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint8
+ zb0001, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = actionType(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *actionType) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*actionType)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z actionType) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z actionType) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *blockAssembler) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "Assembled"
+ o = append(o, 0x85, 0xa9, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Assembled)
+ // string "Authenticators"
+ o = append(o, 0xae, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73)
+ if (*z).Authenticators == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Authenticators)))
+ }
+ for zb0001 := range (*z).Authenticators {
+ o = (*z).Authenticators[zb0001].MarshalMsg(o)
+ }
+ // string "Filled"
+ o = append(o, 0xa6, 0x46, 0x69, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Filled)
+ // string "Payload"
+ o = append(o, 0xa7, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64)
+ o = (*z).Payload.MarshalMsg(o)
+ // string "Pipeline"
+ o = append(o, 0xa8, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65)
+ o = (*z).Pipeline.MarshalMsg(o)
+ return
+}
+
+func (_ *blockAssembler) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*blockAssembler)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *blockAssembler) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).Pipeline.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pipeline")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Filled")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).Payload.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Payload")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).Assembled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assembled")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Authenticators")
+ return
+ }
+ if zb0005 {
+ (*z).Authenticators = nil
+ } else if (*z).Authenticators != nil && cap((*z).Authenticators) >= zb0004 {
+ (*z).Authenticators = ((*z).Authenticators)[:zb0004]
+ } else {
+ (*z).Authenticators = make([]vote, zb0004)
+ }
+ for zb0001 := range (*z).Authenticators {
+ bts, err = (*z).Authenticators[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Authenticators", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = blockAssembler{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Pipeline":
+ bts, err = (*z).Pipeline.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pipeline")
+ return
+ }
+ case "Filled":
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Filled")
+ return
+ }
+ case "Payload":
+ bts, err = (*z).Payload.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Payload")
+ return
+ }
+ case "Assembled":
+ (*z).Assembled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assembled")
+ return
+ }
+ case "Authenticators":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Authenticators")
+ return
+ }
+ if zb0007 {
+ (*z).Authenticators = nil
+ } else if (*z).Authenticators != nil && cap((*z).Authenticators) >= zb0006 {
+ (*z).Authenticators = ((*z).Authenticators)[:zb0006]
+ } else {
+ (*z).Authenticators = make([]vote, zb0006)
+ }
+ for zb0001 := range (*z).Authenticators {
+ bts, err = (*z).Authenticators[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Authenticators", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *blockAssembler) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*blockAssembler)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *blockAssembler) Msgsize() (s int) {
+ s = 1 + 9 + (*z).Pipeline.Msgsize() + 7 + msgp.BoolSize + 8 + (*z).Payload.Msgsize() + 10 + msgp.BoolSize + 15 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).Authenticators {
+ s += (*z).Authenticators[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *blockAssembler) MsgIsZero() bool {
+ return ((*z).Pipeline.MsgIsZero()) && ((*z).Filled == false) && ((*z).Payload.MsgIsZero()) && ((*z).Assembled == false) && (len((*z).Authenticators) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *bundle) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -775,6 +1400,368 @@ func (z *bundle) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *compoundMessage) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Proposal"
+ o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).Vote.MarshalMsg(o)
+ return
+}
+
+func (_ *compoundMessage) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*compoundMessage)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *compoundMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = compoundMessage{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *compoundMessage) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*compoundMessage)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *compoundMessage) Msgsize() (s int) {
+ s = 1 + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *compoundMessage) MsgIsZero() bool {
+ return ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *diskState) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "ActionTypes"
+ o = append(o, 0x85, 0xab, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73)
+ if (*z).ActionTypes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ActionTypes)))
+ }
+ for zb0001 := range (*z).ActionTypes {
+ o = msgp.AppendUint8(o, uint8((*z).ActionTypes[zb0001]))
+ }
+ // string "Actions"
+ o = append(o, 0xa7, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73)
+ if (*z).Actions == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Actions)))
+ }
+ for zb0002 := range (*z).Actions {
+ o = msgp.AppendBytes(o, (*z).Actions[zb0002])
+ }
+ // string "Clock"
+ o = append(o, 0xa5, 0x43, 0x6c, 0x6f, 0x63, 0x6b)
+ o = msgp.AppendBytes(o, (*z).Clock)
+ // string "Player"
+ o = append(o, 0xa6, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72)
+ o = msgp.AppendBytes(o, (*z).Player)
+ // string "Router"
+ o = append(o, 0xa6, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72)
+ o = msgp.AppendBytes(o, (*z).Router)
+ return
+}
+
+func (_ *diskState) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*diskState)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *diskState) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Router, bts, err = msgp.ReadBytesBytes(bts, (*z).Router)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Router")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Player, bts, err = msgp.ReadBytesBytes(bts, (*z).Player)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Player")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Clock, bts, err = msgp.ReadBytesBytes(bts, (*z).Clock)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Clock")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ActionTypes")
+ return
+ }
+ if zb0006 {
+ (*z).ActionTypes = nil
+ } else if (*z).ActionTypes != nil && cap((*z).ActionTypes) >= zb0005 {
+ (*z).ActionTypes = ((*z).ActionTypes)[:zb0005]
+ } else {
+ (*z).ActionTypes = make([]actionType, zb0005)
+ }
+ for zb0001 := range (*z).ActionTypes {
+ {
+ var zb0007 uint8
+ zb0007, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ActionTypes", zb0001)
+ return
+ }
+ (*z).ActionTypes[zb0001] = actionType(zb0007)
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Actions")
+ return
+ }
+ if zb0009 {
+ (*z).Actions = nil
+ } else if (*z).Actions != nil && cap((*z).Actions) >= zb0008 {
+ (*z).Actions = ((*z).Actions)[:zb0008]
+ } else {
+ (*z).Actions = make([][]byte, zb0008)
+ }
+ for zb0002 := range (*z).Actions {
+ (*z).Actions[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).Actions[zb0002])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Actions", zb0002)
+ return
+ }
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = diskState{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Router":
+ (*z).Router, bts, err = msgp.ReadBytesBytes(bts, (*z).Router)
+ if err != nil {
+ err = msgp.WrapError(err, "Router")
+ return
+ }
+ case "Player":
+ (*z).Player, bts, err = msgp.ReadBytesBytes(bts, (*z).Player)
+ if err != nil {
+ err = msgp.WrapError(err, "Player")
+ return
+ }
+ case "Clock":
+ (*z).Clock, bts, err = msgp.ReadBytesBytes(bts, (*z).Clock)
+ if err != nil {
+ err = msgp.WrapError(err, "Clock")
+ return
+ }
+ case "ActionTypes":
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ActionTypes")
+ return
+ }
+ if zb0011 {
+ (*z).ActionTypes = nil
+ } else if (*z).ActionTypes != nil && cap((*z).ActionTypes) >= zb0010 {
+ (*z).ActionTypes = ((*z).ActionTypes)[:zb0010]
+ } else {
+ (*z).ActionTypes = make([]actionType, zb0010)
+ }
+ for zb0001 := range (*z).ActionTypes {
+ {
+ var zb0012 uint8
+ zb0012, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ActionTypes", zb0001)
+ return
+ }
+ (*z).ActionTypes[zb0001] = actionType(zb0012)
+ }
+ }
+ case "Actions":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Actions")
+ return
+ }
+ if zb0014 {
+ (*z).Actions = nil
+ } else if (*z).Actions != nil && cap((*z).Actions) >= zb0013 {
+ (*z).Actions = ((*z).Actions)[:zb0013]
+ } else {
+ (*z).Actions = make([][]byte, zb0013)
+ }
+ for zb0002 := range (*z).Actions {
+ (*z).Actions[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).Actions[zb0002])
+ if err != nil {
+ err = msgp.WrapError(err, "Actions", zb0002)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *diskState) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*diskState)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *diskState) Msgsize() (s int) {
+ s = 1 + 7 + msgp.BytesPrefixSize + len((*z).Router) + 7 + msgp.BytesPrefixSize + len((*z).Player) + 6 + msgp.BytesPrefixSize + len((*z).Clock) + 12 + msgp.ArrayHeaderSize + (len((*z).ActionTypes) * (msgp.Uint8Size)) + 8 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).Actions {
+ s += msgp.BytesPrefixSize + len((*z).Actions[zb0002])
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *diskState) MsgIsZero() bool {
+ return (len((*z).Router) == 0) && (len((*z).Player) == 0) && (len((*z).Clock) == 0) && (len((*z).ActionTypes) == 0) && (len((*z).Actions) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *equivocationVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -1307,6 +2294,972 @@ func (z *equivocationVoteAuthenticator) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z eventType) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+func (_ eventType) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(eventType)
+ if !ok {
+ _, ok = (z).(*eventType)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *eventType) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint8
+ zb0001, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = eventType(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *eventType) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*eventType)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z eventType) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z eventType) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *freshnessData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "PlayerLastConcluding"
+ o = append(o, 0x84, 0xb4, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerLastConcluding))
+ // string "PlayerPeriod"
+ o = append(o, 0xac, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerPeriod))
+ // string "PlayerRound"
+ o = append(o, 0xab, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).PlayerRound.MarshalMsg(o)
+ // string "PlayerStep"
+ o = append(o, 0xaa, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerStep))
+ return
+}
+
+func (_ *freshnessData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*freshnessData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *freshnessData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).PlayerRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerPeriod")
+ return
+ }
+ (*z).PlayerPeriod = period(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerStep")
+ return
+ }
+ (*z).PlayerStep = step(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerLastConcluding")
+ return
+ }
+ (*z).PlayerLastConcluding = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = freshnessData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "PlayerRound":
+ bts, err = (*z).PlayerRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerRound")
+ return
+ }
+ case "PlayerPeriod":
+ {
+ var zb0006 uint64
+ zb0006, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerPeriod")
+ return
+ }
+ (*z).PlayerPeriod = period(zb0006)
+ }
+ case "PlayerStep":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerStep")
+ return
+ }
+ (*z).PlayerStep = step(zb0007)
+ }
+ case "PlayerLastConcluding":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerLastConcluding")
+ return
+ }
+ (*z).PlayerLastConcluding = step(zb0008)
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *freshnessData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*freshnessData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *freshnessData) Msgsize() (s int) {
+ s = 1 + 12 + (*z).PlayerRound.Msgsize() + 13 + msgp.Uint64Size + 11 + msgp.Uint64Size + 21 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *freshnessData) MsgIsZero() bool {
+ return ((*z).PlayerRound.MsgIsZero()) && ((*z).PlayerPeriod == 0) && ((*z).PlayerStep == 0) && ((*z).PlayerLastConcluding == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *message) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 9
+ // string "Bundle"
+ o = append(o, 0x89, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).Bundle.MarshalMsg(o)
+ // string "CompoundMessage"
+ o = append(o, 0xaf, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65)
+ // map header, size 2
+ // string "Proposal"
+ o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).CompoundMessage.Proposal.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).CompoundMessage.Vote.MarshalMsg(o)
+ // string "MessageHandle"
+ o = append(o, 0xad, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).MessageHandle.MarshalMsg(o)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Tag"
+ o = append(o, 0xa3, 0x54, 0x61, 0x67)
+ o = (*z).Tag.MarshalMsg(o)
+ // string "UnauthenticatedBundle"
+ o = append(o, 0xb5, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).UnauthenticatedBundle.MarshalMsg(o)
+ // string "UnauthenticatedProposal"
+ o = append(o, 0xb7, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).UnauthenticatedProposal.MarshalMsg(o)
+ // string "UnauthenticatedVote"
+ o = append(o, 0xb3, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).UnauthenticatedVote.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).Vote.MarshalMsg(o)
+ return
+}
+
+func (_ *message) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*message)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *message) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).MessageHandle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MessageHandle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Tag.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Tag")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedVote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedVote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedProposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedProposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedBundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedBundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ if zb0004 {
+ (*z).CompoundMessage = compoundMessage{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = message{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "MessageHandle":
+ bts, err = (*z).MessageHandle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MessageHandle")
+ return
+ }
+ case "Tag":
+ bts, err = (*z).Tag.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Tag")
+ return
+ }
+ case "Vote":
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ case "Bundle":
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bundle")
+ return
+ }
+ case "UnauthenticatedVote":
+ bts, err = (*z).UnauthenticatedVote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedVote")
+ return
+ }
+ case "UnauthenticatedProposal":
+ bts, err = (*z).UnauthenticatedProposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedProposal")
+ return
+ }
+ case "UnauthenticatedBundle":
+ bts, err = (*z).UnauthenticatedBundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedBundle")
+ return
+ }
+ case "CompoundMessage":
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ if zb0006 {
+ (*z).CompoundMessage = compoundMessage{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *message) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*message)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *message) Msgsize() (s int) {
+ s = 1 + 14 + (*z).MessageHandle.Msgsize() + 4 + (*z).Tag.Msgsize() + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 20 + (*z).UnauthenticatedVote.Msgsize() + 24 + (*z).UnauthenticatedProposal.Msgsize() + 22 + (*z).UnauthenticatedBundle.Msgsize() + 16 + 1 + 5 + (*z).CompoundMessage.Vote.Msgsize() + 9 + (*z).CompoundMessage.Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *message) MsgIsZero() bool {
+ return ((*z).MessageHandle.MsgIsZero()) && ((*z).Tag.MsgIsZero()) && ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).UnauthenticatedVote.MsgIsZero()) && ((*z).UnauthenticatedProposal.MsgIsZero()) && ((*z).UnauthenticatedBundle.MsgIsZero()) && (((*z).CompoundMessage.Vote.MsgIsZero()) && ((*z).CompoundMessage.Proposal.MsgIsZero()))
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *messageEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 7
+ // string "Cancelled"
+ o = append(o, 0x87, 0xa9, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Cancelled)
+ // string "Err"
+ o = append(o, 0xa3, 0x45, 0x72, 0x72)
+ if (*z).Err == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendString(o, string(*(*z).Err))
+ }
+ // string "Input"
+ o = append(o, 0xa5, 0x49, 0x6e, 0x70, 0x75, 0x74)
+ o = (*z).Input.MarshalMsg(o)
+ // string "Proto"
+ o = append(o, 0xa5, 0x50, 0x72, 0x6f, 0x74, 0x6f)
+ o = (*z).Proto.MarshalMsg(o)
+ // string "T"
+ o = append(o, 0xa1, 0x54)
+ o = msgp.AppendUint8(o, uint8((*z).T))
+ // string "Tail"
+ o = append(o, 0xa4, 0x54, 0x61, 0x69, 0x6c)
+ if (*z).Tail == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = (*z).Tail.MarshalMsg(o)
+ }
+ // string "TaskIndex"
+ o = append(o, 0xa9, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendUint64(o, (*z).TaskIndex)
+ return
+}
+
+func (_ *messageEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*messageEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *messageEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "T")
+ return
+ }
+ (*z).T = eventType(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Input.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Input")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0004 string
+ zb0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0004)
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TaskIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TaskIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Tail = nil
+ } else {
+ if (*z).Tail == nil {
+ (*z).Tail = new(messageEvent)
+ }
+ bts, err = (*z).Tail.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Tail")
+ return
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Cancelled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cancelled")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proto")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = messageEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "T":
+ {
+ var zb0005 uint8
+ zb0005, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "T")
+ return
+ }
+ (*z).T = eventType(zb0005)
+ }
+ case "Input":
+ bts, err = (*z).Input.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Input")
+ return
+ }
+ case "Err":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0006 string
+ zb0006, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0006)
+ }
+ }
+ case "TaskIndex":
+ (*z).TaskIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TaskIndex")
+ return
+ }
+ case "Tail":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Tail = nil
+ } else {
+ if (*z).Tail == nil {
+ (*z).Tail = new(messageEvent)
+ }
+ bts, err = (*z).Tail.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Tail")
+ return
+ }
+ }
+ case "Cancelled":
+ (*z).Cancelled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cancelled")
+ return
+ }
+ case "Proto":
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proto")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *messageEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*messageEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *messageEvent) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + (*z).Input.Msgsize() + 4
+ if (*z).Err == nil {
+ s += msgp.NilSize
+ } else {
+ s += msgp.StringPrefixSize + len(string(*(*z).Err))
+ }
+ s += 10 + msgp.Uint64Size + 5
+ if (*z).Tail == nil {
+ s += msgp.NilSize
+ } else {
+ s += (*z).Tail.Msgsize()
+ }
+ s += 10 + msgp.BoolSize + 6 + (*z).Proto.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *messageEvent) MsgIsZero() bool {
+ return ((*z).T == 0) && ((*z).Input.MsgIsZero()) && ((*z).Err == nil) && ((*z).TaskIndex == 0) && ((*z).Tail == nil) && ((*z).Cancelled == false) && ((*z).Proto.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *nextThresholdStatusEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Bottom"
+ o = append(o, 0x82, 0xa6, 0x42, 0x6f, 0x74, 0x74, 0x6f, 0x6d)
+ o = msgp.AppendBool(o, (*z).Bottom)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ return
+}
+
+func (_ *nextThresholdStatusEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*nextThresholdStatusEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *nextThresholdStatusEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = nextThresholdStatusEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *nextThresholdStatusEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*nextThresholdStatusEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *nextThresholdStatusEvent) Msgsize() (s int) {
+ s = 1 + 7 + msgp.BoolSize + 9 + (*z).Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *nextThresholdStatusEvent) MsgIsZero() bool {
+ return ((*z).Bottom == false) && ((*z).Proposal.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z period) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint64(o, uint64(z))
@@ -1353,6 +3306,487 @@ func (z period) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *periodRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Children"
+ o = append(o, 0x84, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]step, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortStep(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalTracker"
+ o = append(o, 0xaf, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72)
+ o = (*z).ProposalTracker.MarshalMsg(o)
+ // string "ProposalTrackerContract"
+ o = append(o, 0xb7, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74)
+ o = (*z).ProposalTrackerContract.MarshalMsg(o)
+ // string "VoteTrackerPeriod"
+ o = append(o, 0xb1, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = (*z).VoteTrackerPeriod.MarshalMsg(o)
+ return
+}
+
+func (_ *periodRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*periodRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *periodRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalTracker")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).VoteTrackerPeriod.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerPeriod")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalTrackerContract")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0006 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[step]*stepRouter, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 step
+ var zb0002 *stepRouter
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(stepRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = periodRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalTracker":
+ bts, err = (*z).ProposalTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalTracker")
+ return
+ }
+ case "VoteTrackerPeriod":
+ bts, err = (*z).VoteTrackerPeriod.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerPeriod")
+ return
+ }
+ case "ProposalTrackerContract":
+ bts, err = (*z).ProposalTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalTrackerContract")
+ return
+ }
+ case "Children":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0008 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[step]*stepRouter, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 step
+ var zb0002 *stepRouter
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(stepRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *periodRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*periodRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *periodRouter) Msgsize() (s int) {
+ s = 1 + 16 + (*z).ProposalTracker.Msgsize() + 18 + (*z).VoteTrackerPeriod.Msgsize() + 24 + (*z).ProposalTrackerContract.Msgsize() + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *periodRouter) MsgIsZero() bool {
+ return ((*z).ProposalTracker.MsgIsZero()) && ((*z).VoteTrackerPeriod.MsgIsZero()) && ((*z).ProposalTrackerContract.MsgIsZero()) && (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *player) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 8
+ // string "Deadline"
+ o = append(o, 0x88, 0xa8, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65)
+ o = msgp.AppendDuration(o, (*z).Deadline)
+ // string "FastRecoveryDeadline"
+ o = append(o, 0xb4, 0x46, 0x61, 0x73, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65)
+ o = msgp.AppendDuration(o, (*z).FastRecoveryDeadline)
+ // string "LastConcluding"
+ o = append(o, 0xae, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67)
+ o = msgp.AppendUint64(o, uint64((*z).LastConcluding))
+ // string "Napping"
+ o = append(o, 0xa7, 0x4e, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67)
+ o = msgp.AppendBool(o, (*z).Napping)
+ // string "Pending"
+ o = append(o, 0xa7, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67)
+ o = (*z).Pending.MarshalMsg(o)
+ // string "Period"
+ o = append(o, 0xa6, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).Period))
+ // string "Round"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).Round.MarshalMsg(o)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ return
+}
+
+func (_ *player) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*player)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *player) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Period")
+ return
+ }
+ (*z).Period = period(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastConcluding")
+ return
+ }
+ (*z).LastConcluding = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Deadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Deadline")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Napping, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Napping")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).FastRecoveryDeadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "FastRecoveryDeadline")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Pending.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = player{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Round":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "Period":
+ {
+ var zb0006 uint64
+ zb0006, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Period")
+ return
+ }
+ (*z).Period = period(zb0006)
+ }
+ case "Step":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0007)
+ }
+ case "LastConcluding":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastConcluding")
+ return
+ }
+ (*z).LastConcluding = step(zb0008)
+ }
+ case "Deadline":
+ (*z).Deadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Deadline")
+ return
+ }
+ case "Napping":
+ (*z).Napping, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Napping")
+ return
+ }
+ case "FastRecoveryDeadline":
+ (*z).FastRecoveryDeadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FastRecoveryDeadline")
+ return
+ }
+ case "Pending":
+ bts, err = (*z).Pending.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *player) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*player)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *player) Msgsize() (s int) {
+ s = 1 + 6 + (*z).Round.Msgsize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 15 + msgp.Uint64Size + 9 + msgp.DurationSize + 8 + msgp.BoolSize + 21 + msgp.DurationSize + 8 + (*z).Pending.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *player) MsgIsZero() bool {
+ return ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).LastConcluding == 0) && ((*z).Deadline == 0) && ((*z).Napping == false) && ((*z).FastRecoveryDeadline == 0) && ((*z).Pending.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -2243,6 +4677,1048 @@ func (z *proposal) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *proposalManager) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *proposalManager) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalManager)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalManager) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalManager{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalManager) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalManager)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalManager) Msgsize() (s int) {
+ s = 1
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalManager) MsgIsZero() bool {
+ return true
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalSeeker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Filled"
+ o = append(o, 0x83, 0xa6, 0x46, 0x69, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Filled)
+ // string "Frozen"
+ o = append(o, 0xa6, 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e)
+ o = msgp.AppendBool(o, (*z).Frozen)
+ // string "Lowest"
+ o = append(o, 0xa6, 0x4c, 0x6f, 0x77, 0x65, 0x73, 0x74)
+ o = (*z).Lowest.MarshalMsg(o)
+ return
+}
+
+func (_ *proposalSeeker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalSeeker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalSeeker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Lowest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Lowest")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Filled")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Frozen, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Frozen")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalSeeker{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Lowest":
+ bts, err = (*z).Lowest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Lowest")
+ return
+ }
+ case "Filled":
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Filled")
+ return
+ }
+ case "Frozen":
+ (*z).Frozen, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Frozen")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalSeeker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalSeeker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalSeeker) Msgsize() (s int) {
+ s = 1 + 7 + (*z).Lowest.Msgsize() + 7 + msgp.BoolSize + 7 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalSeeker) MsgIsZero() bool {
+ return ((*z).Lowest.MsgIsZero()) && ((*z).Filled == false) && ((*z).Frozen == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalStore) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Assemblers"
+ o = append(o, 0x83, 0xaa, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x72, 0x73)
+ if (*z).Assemblers == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Assemblers)))
+ }
+ zb0003_keys := make([]proposalValue, 0, len((*z).Assemblers))
+ for zb0003 := range (*z).Assemblers {
+ zb0003_keys = append(zb0003_keys, zb0003)
+ }
+ sort.Sort(SortProposalValue(zb0003_keys))
+ for _, zb0003 := range zb0003_keys {
+ zb0004 := (*z).Assemblers[zb0003]
+ _ = zb0004
+ o = zb0003.MarshalMsg(o)
+ o = zb0004.MarshalMsg(o)
+ }
+ // string "Pinned"
+ o = append(o, 0xa6, 0x50, 0x69, 0x6e, 0x6e, 0x65, 0x64)
+ o = (*z).Pinned.MarshalMsg(o)
+ // string "Relevant"
+ o = append(o, 0xa8, 0x52, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74)
+ if (*z).Relevant == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Relevant)))
+ }
+ zb0001_keys := make([]period, 0, len((*z).Relevant))
+ for zb0001 := range (*z).Relevant {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortPeriod(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Relevant[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *proposalStore) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalStore)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalStore) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant")
+ return
+ }
+ if zb0008 {
+ (*z).Relevant = nil
+ } else if (*z).Relevant == nil {
+ (*z).Relevant = make(map[period]proposalValue, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 period
+ var zb0002 proposalValue
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant", zb0001)
+ return
+ }
+ (*z).Relevant[zb0001] = zb0002
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).Pinned.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pinned")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers")
+ return
+ }
+ if zb0010 {
+ (*z).Assemblers = nil
+ } else if (*z).Assemblers == nil {
+ (*z).Assemblers = make(map[proposalValue]blockAssembler, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0003 proposalValue
+ var zb0004 blockAssembler
+ zb0009--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers", zb0003)
+ return
+ }
+ (*z).Assemblers[zb0003] = zb0004
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0006 {
+ (*z) = proposalStore{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Relevant":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant")
+ return
+ }
+ if zb0012 {
+ (*z).Relevant = nil
+ } else if (*z).Relevant == nil {
+ (*z).Relevant = make(map[period]proposalValue, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0001 period
+ var zb0002 proposalValue
+ zb0011--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant", zb0001)
+ return
+ }
+ (*z).Relevant[zb0001] = zb0002
+ }
+ case "Pinned":
+ bts, err = (*z).Pinned.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pinned")
+ return
+ }
+ case "Assemblers":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers")
+ return
+ }
+ if zb0014 {
+ (*z).Assemblers = nil
+ } else if (*z).Assemblers == nil {
+ (*z).Assemblers = make(map[proposalValue]blockAssembler, zb0013)
+ }
+ for zb0013 > 0 {
+ var zb0003 proposalValue
+ var zb0004 blockAssembler
+ zb0013--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers", zb0003)
+ return
+ }
+ (*z).Assemblers[zb0003] = zb0004
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalStore) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalStore)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalStore) Msgsize() (s int) {
+ s = 1 + 9 + msgp.MapHeaderSize
+ if (*z).Relevant != nil {
+ for zb0001, zb0002 := range (*z).Relevant {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ s += 7 + (*z).Pinned.Msgsize() + 11 + msgp.MapHeaderSize
+ if (*z).Assemblers != nil {
+ for zb0003, zb0004 := range (*z).Assemblers {
+ _ = zb0003
+ _ = zb0004
+ s += 0 + zb0003.Msgsize() + zb0004.Msgsize()
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalStore) MsgIsZero() bool {
+ return (len((*z).Relevant) == 0) && ((*z).Pinned.MsgIsZero()) && (len((*z).Assemblers) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTable) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0003Len := uint32(2)
+ var zb0003Mask uint8 /* 3 bits */
+ if len((*z).Pending) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x1
+ }
+ if (*z).PendingNext == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x2
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x1) == 0 { // if not empty
+ // string "Pending"
+ o = append(o, 0xa7, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67)
+ if (*z).Pending == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Pending)))
+ }
+ zb0001_keys := make([]uint64, 0, len((*z).Pending))
+ for zb0001 := range (*z).Pending {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortUint64(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Pending[zb0001]
+ _ = zb0002
+ o = msgp.AppendUint64(o, zb0001)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ }
+ if (zb0003Mask & 0x2) == 0 { // if not empty
+ // string "PendingNext"
+ o = append(o, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4e, 0x65, 0x78, 0x74)
+ o = msgp.AppendUint64(o, (*z).PendingNext)
+ }
+ }
+ return
+}
+
+func (_ *proposalTable) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTable)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTable) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ if zb0006 {
+ (*z).Pending = nil
+ } else if (*z).Pending == nil {
+ (*z).Pending = make(map[uint64]*messageEvent, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 uint64
+ var zb0002 *messageEvent
+ zb0005--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(messageEvent)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending", zb0001)
+ return
+ }
+ }
+ (*z).Pending[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).PendingNext, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PendingNext")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalTable{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Pending":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ if zb0008 {
+ (*z).Pending = nil
+ } else if (*z).Pending == nil {
+ (*z).Pending = make(map[uint64]*messageEvent, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 uint64
+ var zb0002 *messageEvent
+ zb0007--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(messageEvent)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending", zb0001)
+ return
+ }
+ }
+ (*z).Pending[zb0001] = zb0002
+ }
+ case "PendingNext":
+ (*z).PendingNext, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PendingNext")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTable) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTable)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTable) Msgsize() (s int) {
+ s = 1 + 8 + msgp.MapHeaderSize
+ if (*z).Pending != nil {
+ for zb0001, zb0002 := range (*z).Pending {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + msgp.Uint64Size
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ s += 12 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTable) MsgIsZero() bool {
+ return (len((*z).Pending) == 0) && ((*z).PendingNext == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTracker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Duplicate"
+ o = append(o, 0x83, 0xa9, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
+ if (*z).Duplicate == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Duplicate)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Duplicate))
+ for zb0001 := range (*z).Duplicate {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Duplicate[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = msgp.AppendBool(o, zb0002)
+ }
+ // string "Freezer"
+ o = append(o, 0xa7, 0x46, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x72)
+ o = (*z).Freezer.MarshalMsg(o)
+ // string "Staging"
+ o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67)
+ o = (*z).Staging.MarshalMsg(o)
+ return
+}
+
+func (_ *proposalTracker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTracker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate")
+ return
+ }
+ if zb0006 {
+ (*z).Duplicate = nil
+ } else if (*z).Duplicate == nil {
+ (*z).Duplicate = make(map[basics.Address]bool, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 basics.Address
+ var zb0002 bool
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate")
+ return
+ }
+ zb0002, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate", zb0001)
+ return
+ }
+ (*z).Duplicate[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Freezer.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freezer")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Staging.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Staging")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalTracker{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Duplicate":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate")
+ return
+ }
+ if zb0008 {
+ (*z).Duplicate = nil
+ } else if (*z).Duplicate == nil {
+ (*z).Duplicate = make(map[basics.Address]bool, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 basics.Address
+ var zb0002 bool
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate")
+ return
+ }
+ zb0002, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate", zb0001)
+ return
+ }
+ (*z).Duplicate[zb0001] = zb0002
+ }
+ case "Freezer":
+ bts, err = (*z).Freezer.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freezer")
+ return
+ }
+ case "Staging":
+ bts, err = (*z).Staging.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Staging")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTracker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTracker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTracker) Msgsize() (s int) {
+ s = 1 + 10 + msgp.MapHeaderSize
+ if (*z).Duplicate != nil {
+ for zb0001, zb0002 := range (*z).Duplicate {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + msgp.BoolSize
+ }
+ }
+ s += 8 + (*z).Freezer.Msgsize() + 8 + (*z).Staging.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTracker) MsgIsZero() bool {
+ return (len((*z).Duplicate) == 0) && ((*z).Freezer.MsgIsZero()) && ((*z).Staging.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTrackerContract) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Froze"
+ o = append(o, 0x84, 0xa5, 0x46, 0x72, 0x6f, 0x7a, 0x65)
+ o = msgp.AppendBool(o, (*z).Froze)
+ // string "SawCertThreshold"
+ o = append(o, 0xb0, 0x53, 0x61, 0x77, 0x43, 0x65, 0x72, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64)
+ o = msgp.AppendBool(o, (*z).SawCertThreshold)
+ // string "SawOneVote"
+ o = append(o, 0xaa, 0x53, 0x61, 0x77, 0x4f, 0x6e, 0x65, 0x56, 0x6f, 0x74, 0x65)
+ o = msgp.AppendBool(o, (*z).SawOneVote)
+ // string "SawSoftThreshold"
+ o = append(o, 0xb0, 0x53, 0x61, 0x77, 0x53, 0x6f, 0x66, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64)
+ o = msgp.AppendBool(o, (*z).SawSoftThreshold)
+ return
+}
+
+func (_ *proposalTrackerContract) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTrackerContract)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTrackerContract) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawOneVote, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawOneVote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Froze, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Froze")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawSoftThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawSoftThreshold")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawCertThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawCertThreshold")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalTrackerContract{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "SawOneVote":
+ (*z).SawOneVote, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawOneVote")
+ return
+ }
+ case "Froze":
+ (*z).Froze, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Froze")
+ return
+ }
+ case "SawSoftThreshold":
+ (*z).SawSoftThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawSoftThreshold")
+ return
+ }
+ case "SawCertThreshold":
+ (*z).SawCertThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawCertThreshold")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTrackerContract) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTrackerContract)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTrackerContract) Msgsize() (s int) {
+ s = 1 + 11 + msgp.BoolSize + 6 + msgp.BoolSize + 17 + msgp.BoolSize + 17 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTrackerContract) MsgIsZero() bool {
+ return ((*z).SawOneVote == false) && ((*z).Froze == false) && ((*z).SawSoftThreshold == false) && ((*z).SawCertThreshold == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposalValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -2426,6 +5902,185 @@ func (z *proposalValue) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *proposalVoteCounter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Count"
+ o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).Count)
+ // string "Votes"
+ o = append(o, 0xa5, 0x56, 0x6f, 0x74, 0x65, 0x73)
+ if (*z).Votes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Votes)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Votes))
+ for zb0001 := range (*z).Votes {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Votes[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *proposalVoteCounter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalVoteCounter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalVoteCounter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Count, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Count")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes")
+ return
+ }
+ if zb0006 {
+ (*z).Votes = nil
+ } else if (*z).Votes == nil {
+ (*z).Votes = make(map[basics.Address]vote, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes", zb0001)
+ return
+ }
+ (*z).Votes[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalVoteCounter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Count":
+ (*z).Count, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Count")
+ return
+ }
+ case "Votes":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes")
+ return
+ }
+ if zb0008 {
+ (*z).Votes = nil
+ } else if (*z).Votes == nil {
+ (*z).Votes = make(map[basics.Address]vote, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes", zb0001)
+ return
+ }
+ (*z).Votes[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalVoteCounter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalVoteCounter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalVoteCounter) Msgsize() (s int) {
+ s = 1 + 6 + msgp.Uint64Size + 6 + msgp.MapHeaderSize
+ if (*z).Votes != nil {
+ for zb0001, zb0002 := range (*z).Votes {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalVoteCounter) MsgIsZero() bool {
+ return ((*z).Count == 0) && (len((*z).Votes) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposerSeed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@@ -2751,6 +6406,740 @@ func (z *rawVote) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *rootRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Children"
+ o = append(o, 0x83, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]round, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortRound(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalManager"
+ o = append(o, 0xaf, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72)
+ // map header, size 0
+ o = append(o, 0x80)
+ // string "VoteAggregator"
+ o = append(o, 0xae, 0x56, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72)
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *rootRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*rootRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *rootRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ if zb0006 {
+ (*z).ProposalManager = proposalManager{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ if zb0008 {
+ (*z).VoteAggregator = voteAggregator{}
+ }
+ for zb0007 > 0 {
+ zb0007--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0010 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[round]*roundRouter, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0001 round
+ var zb0002 *roundRouter
+ zb0009--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(roundRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = rootRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalManager":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ if zb0011 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0011)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ if zb0012 {
+ (*z).ProposalManager = proposalManager{}
+ }
+ for zb0011 > 0 {
+ zb0011--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ }
+ }
+ }
+ case "VoteAggregator":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ if zb0013 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0013)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ if zb0014 {
+ (*z).VoteAggregator = voteAggregator{}
+ }
+ for zb0013 > 0 {
+ zb0013--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ }
+ }
+ }
+ case "Children":
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0016 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[round]*roundRouter, zb0015)
+ }
+ for zb0015 > 0 {
+ var zb0001 round
+ var zb0002 *roundRouter
+ zb0015--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(roundRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *rootRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*rootRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *rootRouter) Msgsize() (s int) {
+ s = 1 + 16 + 1 + 15 + 1 + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *rootRouter) MsgIsZero() bool {
+ return (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *roundRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Children"
+ o = append(o, 0x83, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]period, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortPeriod(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalStore"
+ o = append(o, 0xad, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x65)
+ o = (*z).ProposalStore.MarshalMsg(o)
+ // string "VoteTrackerRound"
+ o = append(o, 0xb0, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ // map header, size 2
+ // string "Freshest"
+ o = append(o, 0x82, 0xa8, 0x46, 0x72, 0x65, 0x73, 0x68, 0x65, 0x73, 0x74)
+ o = (*z).VoteTrackerRound.Freshest.MarshalMsg(o)
+ // string "Ok"
+ o = append(o, 0xa2, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).VoteTrackerRound.Ok)
+ return
+}
+
+func (_ *roundRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*roundRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *roundRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalStore.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalStore")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ if zb0006 {
+ (*z).VoteTrackerRound = voteTrackerRound{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0008 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[period]*periodRouter, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 period
+ var zb0002 *periodRouter
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(periodRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = roundRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalStore":
+ bts, err = (*z).ProposalStore.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalStore")
+ return
+ }
+ case "VoteTrackerRound":
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ if zb0009 > 0 {
+ zb0009--
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0009)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ if zb0010 {
+ (*z).VoteTrackerRound = voteTrackerRound{}
+ }
+ for zb0009 > 0 {
+ zb0009--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ }
+ }
+ }
+ case "Children":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0012 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[period]*periodRouter, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0001 period
+ var zb0002 *periodRouter
+ zb0011--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(periodRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *roundRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*roundRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *roundRouter) Msgsize() (s int) {
+ s = 1 + 14 + (*z).ProposalStore.Msgsize() + 17 + 1 + 9 + (*z).VoteTrackerRound.Freshest.Msgsize() + 3 + msgp.BoolSize + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *roundRouter) MsgIsZero() bool {
+ return ((*z).ProposalStore.MsgIsZero()) && (((*z).VoteTrackerRound.Freshest.MsgIsZero()) && ((*z).VoteTrackerRound.Ok == false)) && (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *seedInput) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@@ -3023,22 +7412,22 @@ func (z *selector) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z serializableErrorUnderlying) MarshalMsg(b []byte) (o []byte) {
+func (z serializableError) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
-func (_ serializableErrorUnderlying) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(serializableErrorUnderlying)
+func (_ serializableError) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(serializableError)
if !ok {
- _, ok = (z).(*serializableErrorUnderlying)
+ _, ok = (z).(*serializableError)
}
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *serializableErrorUnderlying) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *serializableError) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
@@ -3046,25 +7435,25 @@ func (z *serializableErrorUnderlying) UnmarshalMsg(bts []byte) (o []byte, err er
err = msgp.WrapError(err)
return
}
- (*z) = serializableErrorUnderlying(zb0001)
+ (*z) = serializableError(zb0001)
}
o = bts
return
}
-func (_ *serializableErrorUnderlying) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*serializableErrorUnderlying)
+func (_ *serializableError) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*serializableError)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z serializableErrorUnderlying) Msgsize() (s int) {
+func (z serializableError) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MsgIsZero returns whether this is a zero value
-func (z serializableErrorUnderlying) MsgIsZero() bool {
+func (z serializableError) MsgIsZero() bool {
return z == ""
}
@@ -3115,6 +7504,337 @@ func (z step) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *stepRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "VoteTracker"
+ o = append(o, 0x82, 0xab, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72)
+ o = (*z).VoteTracker.MarshalMsg(o)
+ // string "VoteTrackerContract"
+ o = append(o, 0xb3, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74)
+ o = (*z).VoteTrackerContract.MarshalMsg(o)
+ return
+}
+
+func (_ *stepRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*stepRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *stepRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTracker")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerContract")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = stepRouter{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "VoteTracker":
+ bts, err = (*z).VoteTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTracker")
+ return
+ }
+ case "VoteTrackerContract":
+ bts, err = (*z).VoteTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerContract")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *stepRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*stepRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *stepRouter) Msgsize() (s int) {
+ s = 1 + 12 + (*z).VoteTracker.Msgsize() + 20 + (*z).VoteTrackerContract.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *stepRouter) MsgIsZero() bool {
+ return ((*z).VoteTracker.MsgIsZero()) && ((*z).VoteTrackerContract.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *thresholdEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 7
+ // string "Bundle"
+ o = append(o, 0x87, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).Bundle.MarshalMsg(o)
+ // string "Period"
+ o = append(o, 0xa6, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).Period))
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Proto"
+ o = append(o, 0xa5, 0x50, 0x72, 0x6f, 0x74, 0x6f)
+ o = (*z).Proto.MarshalMsg(o)
+ // string "Round"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).Round.MarshalMsg(o)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ // string "T"
+ o = append(o, 0xa1, 0x54)
+ o = msgp.AppendUint8(o, uint8((*z).T))
+ return
+}
+
+func (_ *thresholdEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*thresholdEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *thresholdEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "T")
+ return
+ }
+ (*z).T = eventType(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Period")
+ return
+ }
+ (*z).Period = period(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proto")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = thresholdEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "T":
+ {
+ var zb0006 uint8
+ zb0006, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "T")
+ return
+ }
+ (*z).T = eventType(zb0006)
+ }
+ case "Round":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "Period":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Period")
+ return
+ }
+ (*z).Period = period(zb0007)
+ }
+ case "Step":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0008)
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ case "Bundle":
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bundle")
+ return
+ }
+ case "Proto":
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proto")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *thresholdEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*thresholdEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *thresholdEvent) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + (*z).Round.Msgsize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 6 + (*z).Proto.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *thresholdEvent) MsgIsZero() bool {
+ return ((*z).T == 0) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).Proto.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -5885,6 +10605,84 @@ func (z *vote) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *voteAggregator) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *voteAggregator) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteAggregator)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteAggregator) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteAggregator{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteAggregator) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteAggregator)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteAggregator) Msgsize() (s int) {
+ s = 1
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteAggregator) MsgIsZero() bool {
+ return true
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *voteAuthenticator) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -6023,3 +10821,830 @@ func (z *voteAuthenticator) Msgsize() (s int) {
func (z *voteAuthenticator) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTracker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Counts"
+ o = append(o, 0x84, 0xa6, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73)
+ if (*z).Counts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Counts)))
+ }
+ zb0003_keys := make([]proposalValue, 0, len((*z).Counts))
+ for zb0003 := range (*z).Counts {
+ zb0003_keys = append(zb0003_keys, zb0003)
+ }
+ sort.Sort(SortProposalValue(zb0003_keys))
+ for _, zb0003 := range zb0003_keys {
+ zb0004 := (*z).Counts[zb0003]
+ _ = zb0004
+ o = zb0003.MarshalMsg(o)
+ o = zb0004.MarshalMsg(o)
+ }
+ // string "Equivocators"
+ o = append(o, 0xac, 0x45, 0x71, 0x75, 0x69, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73)
+ if (*z).Equivocators == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Equivocators)))
+ }
+ zb0005_keys := make([]basics.Address, 0, len((*z).Equivocators))
+ for zb0005 := range (*z).Equivocators {
+ zb0005_keys = append(zb0005_keys, zb0005)
+ }
+ sort.Sort(SortAddress(zb0005_keys))
+ for _, zb0005 := range zb0005_keys {
+ zb0006 := (*z).Equivocators[zb0005]
+ _ = zb0006
+ o = zb0005.MarshalMsg(o)
+ o = zb0006.MarshalMsg(o)
+ }
+ // string "EquivocatorsCount"
+ o = append(o, 0xb1, 0x45, 0x71, 0x75, 0x69, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).EquivocatorsCount)
+ // string "Voters"
+ o = append(o, 0xa6, 0x56, 0x6f, 0x74, 0x65, 0x72, 0x73)
+ if (*z).Voters == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Voters)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Voters))
+ for zb0001 := range (*z).Voters {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Voters[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *voteTracker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTracker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters")
+ return
+ }
+ if zb0010 {
+ (*z).Voters = nil
+ } else if (*z).Voters == nil {
+ (*z).Voters = make(map[basics.Address]vote, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0009--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters", zb0001)
+ return
+ }
+ (*z).Voters[zb0001] = zb0002
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts")
+ return
+ }
+ if zb0012 {
+ (*z).Counts = nil
+ } else if (*z).Counts == nil {
+ (*z).Counts = make(map[proposalValue]proposalVoteCounter, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0003 proposalValue
+ var zb0004 proposalVoteCounter
+ zb0011--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts", zb0003)
+ return
+ }
+ (*z).Counts[zb0003] = zb0004
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators")
+ return
+ }
+ if zb0014 {
+ (*z).Equivocators = nil
+ } else if (*z).Equivocators == nil {
+ (*z).Equivocators = make(map[basics.Address]equivocationVote, zb0013)
+ }
+ for zb0013 > 0 {
+ var zb0005 basics.Address
+ var zb0006 equivocationVote
+ zb0013--
+ bts, err = zb0005.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators")
+ return
+ }
+ bts, err = zb0006.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators", zb0005)
+ return
+ }
+ (*z).Equivocators[zb0005] = zb0006
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ (*z).EquivocatorsCount, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "EquivocatorsCount")
+ return
+ }
+ }
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0008 {
+ (*z) = voteTracker{}
+ }
+ for zb0007 > 0 {
+ zb0007--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Voters":
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters")
+ return
+ }
+ if zb0016 {
+ (*z).Voters = nil
+ } else if (*z).Voters == nil {
+ (*z).Voters = make(map[basics.Address]vote, zb0015)
+ }
+ for zb0015 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0015--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters", zb0001)
+ return
+ }
+ (*z).Voters[zb0001] = zb0002
+ }
+ case "Counts":
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts")
+ return
+ }
+ if zb0018 {
+ (*z).Counts = nil
+ } else if (*z).Counts == nil {
+ (*z).Counts = make(map[proposalValue]proposalVoteCounter, zb0017)
+ }
+ for zb0017 > 0 {
+ var zb0003 proposalValue
+ var zb0004 proposalVoteCounter
+ zb0017--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts", zb0003)
+ return
+ }
+ (*z).Counts[zb0003] = zb0004
+ }
+ case "Equivocators":
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators")
+ return
+ }
+ if zb0020 {
+ (*z).Equivocators = nil
+ } else if (*z).Equivocators == nil {
+ (*z).Equivocators = make(map[basics.Address]equivocationVote, zb0019)
+ }
+ for zb0019 > 0 {
+ var zb0005 basics.Address
+ var zb0006 equivocationVote
+ zb0019--
+ bts, err = zb0005.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators")
+ return
+ }
+ bts, err = zb0006.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators", zb0005)
+ return
+ }
+ (*z).Equivocators[zb0005] = zb0006
+ }
+ case "EquivocatorsCount":
+ (*z).EquivocatorsCount, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "EquivocatorsCount")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTracker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTracker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTracker) Msgsize() (s int) {
+ s = 1 + 7 + msgp.MapHeaderSize
+ if (*z).Voters != nil {
+ for zb0001, zb0002 := range (*z).Voters {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ s += 7 + msgp.MapHeaderSize
+ if (*z).Counts != nil {
+ for zb0003, zb0004 := range (*z).Counts {
+ _ = zb0003
+ _ = zb0004
+ s += 0 + zb0003.Msgsize() + zb0004.Msgsize()
+ }
+ }
+ s += 13 + msgp.MapHeaderSize
+ if (*z).Equivocators != nil {
+ for zb0005, zb0006 := range (*z).Equivocators {
+ _ = zb0005
+ _ = zb0006
+ s += 0 + zb0005.Msgsize() + zb0006.Msgsize()
+ }
+ }
+ s += 18 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTracker) MsgIsZero() bool {
+ return (len((*z).Voters) == 0) && (len((*z).Counts) == 0) && (len((*z).Equivocators) == 0) && ((*z).EquivocatorsCount == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerContract) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Emitted"
+ o = append(o, 0x83, 0xa7, 0x45, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Emitted)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ // string "StepOk"
+ o = append(o, 0xa6, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).StepOk)
+ return
+}
+
+func (_ *voteTrackerContract) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerContract)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerContract) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).StepOk, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StepOk")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Emitted, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Emitted")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerContract{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Step":
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0004)
+ }
+ case "StepOk":
+ (*z).StepOk, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StepOk")
+ return
+ }
+ case "Emitted":
+ (*z).Emitted, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Emitted")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerContract) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerContract)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerContract) Msgsize() (s int) {
+ s = 1 + 5 + msgp.Uint64Size + 7 + msgp.BoolSize + 8 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerContract) MsgIsZero() bool {
+ return ((*z).Step == 0) && ((*z).StepOk == false) && ((*z).Emitted == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerPeriod) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 1
+ // string "Cached"
+ o = append(o, 0x81, 0xa6, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64)
+ // map header, size 2
+ // string "Bottom"
+ o = append(o, 0x82, 0xa6, 0x42, 0x6f, 0x74, 0x74, 0x6f, 0x6d)
+ o = msgp.AppendBool(o, (*z).Cached.Bottom)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Cached.Proposal.MarshalMsg(o)
+ return
+}
+
+func (_ *voteTrackerPeriod) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerPeriod)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerPeriod) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ if zb0004 {
+ (*z).Cached = nextThresholdStatusEvent{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerPeriod{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Cached":
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ if zb0006 {
+ (*z).Cached = nextThresholdStatusEvent{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerPeriod) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerPeriod)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerPeriod) Msgsize() (s int) {
+ s = 1 + 7 + 1 + 7 + msgp.BoolSize + 9 + (*z).Cached.Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerPeriod) MsgIsZero() bool {
+ return (((*z).Cached.Bottom == false) && ((*z).Cached.Proposal.MsgIsZero()))
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerRound) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Freshest"
+ o = append(o, 0x82, 0xa8, 0x46, 0x72, 0x65, 0x73, 0x68, 0x65, 0x73, 0x74)
+ o = (*z).Freshest.MarshalMsg(o)
+ // string "Ok"
+ o = append(o, 0xa2, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).Ok)
+ return
+}
+
+func (_ *voteTrackerRound) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerRound)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerRound) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerRound{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerRound) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerRound)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerRound) Msgsize() (s int) {
+ s = 1 + 9 + (*z).Freshest.Msgsize() + 3 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerRound) MsgIsZero() bool {
+ return ((*z).Freshest.MsgIsZero()) && ((*z).Ok == false)
+}
diff --git a/agreement/msgp_gen_test.go b/agreement/msgp_gen_test.go
index 0231cc28a..99053ca4c 100644
--- a/agreement/msgp_gen_test.go
+++ b/agreement/msgp_gen_test.go
@@ -74,6 +74,126 @@ func BenchmarkUnmarshalCertificate(b *testing.B) {
}
}
+func TestMarshalUnmarshalConsensusVersionView(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ConsensusVersionView{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingConsensusVersionView(t *testing.T) {
+ protocol.RunEncodingTest(t, &ConsensusVersionView{})
+}
+
+func BenchmarkMarshalMsgConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalblockAssembler(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := blockAssembler{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingblockAssembler(t *testing.T) {
+ protocol.RunEncodingTest(t, &blockAssembler{})
+}
+
+func BenchmarkMarshalMsgblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalbundle(t *testing.T) {
partitiontest.PartitionTest(t)
v := bundle{}
@@ -134,6 +254,126 @@ func BenchmarkUnmarshalbundle(b *testing.B) {
}
}
+func TestMarshalUnmarshalcompoundMessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := compoundMessage{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingcompoundMessage(t *testing.T) {
+ protocol.RunEncodingTest(t, &compoundMessage{})
+}
+
+func BenchmarkMarshalMsgcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshaldiskState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := diskState{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingdiskState(t *testing.T) {
+ protocol.RunEncodingTest(t, &diskState{})
+}
+
+func BenchmarkMarshalMsgdiskState(b *testing.B) {
+ v := diskState{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgdiskState(b *testing.B) {
+ v := diskState{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshaldiskState(b *testing.B) {
+ v := diskState{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalequivocationVote(t *testing.T) {
partitiontest.PartitionTest(t)
v := equivocationVote{}
@@ -254,6 +494,366 @@ func BenchmarkUnmarshalequivocationVoteAuthenticator(b *testing.B) {
}
}
+func TestMarshalUnmarshalfreshnessData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := freshnessData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingfreshnessData(t *testing.T) {
+ protocol.RunEncodingTest(t, &freshnessData{})
+}
+
+func BenchmarkMarshalMsgfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalmessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := message{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingmessage(t *testing.T) {
+ protocol.RunEncodingTest(t, &message{})
+}
+
+func BenchmarkMarshalMsgmessage(b *testing.B) {
+ v := message{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgmessage(b *testing.B) {
+ v := message{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalmessage(b *testing.B) {
+ v := message{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalmessageEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := messageEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingmessageEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &messageEvent{})
+}
+
+func BenchmarkMarshalMsgmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalnextThresholdStatusEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := nextThresholdStatusEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingnextThresholdStatusEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &nextThresholdStatusEvent{})
+}
+
+func BenchmarkMarshalMsgnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalperiodRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := periodRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingperiodRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &periodRouter{})
+}
+
+func BenchmarkMarshalMsgperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalplayer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := player{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingplayer(t *testing.T) {
+ protocol.RunEncodingTest(t, &player{})
+}
+
+func BenchmarkMarshalMsgplayer(b *testing.B) {
+ v := player{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgplayer(b *testing.B) {
+ v := player{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalplayer(b *testing.B) {
+ v := player{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposal(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposal{}
@@ -314,6 +914,366 @@ func BenchmarkUnmarshalproposal(b *testing.B) {
}
}
+func TestMarshalUnmarshalproposalManager(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalManager{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalManager(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalManager{})
+}
+
+func BenchmarkMarshalMsgproposalManager(b *testing.B) {
+ v := proposalManager{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalManager(b *testing.B) {
+ v := proposalManager{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalManager(b *testing.B) {
+ v := proposalManager{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalSeeker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalSeeker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalSeeker(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalSeeker{})
+}
+
+func BenchmarkMarshalMsgproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalStore(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalStore{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalStore(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalStore{})
+}
+
+func BenchmarkMarshalMsgproposalStore(b *testing.B) {
+ v := proposalStore{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalStore(b *testing.B) {
+ v := proposalStore{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalStore(b *testing.B) {
+ v := proposalStore{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTable{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTable(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTable{})
+}
+
+func BenchmarkMarshalMsgproposalTable(b *testing.B) {
+ v := proposalTable{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTable(b *testing.B) {
+ v := proposalTable{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTable(b *testing.B) {
+ v := proposalTable{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTracker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTracker(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTracker{})
+}
+
+func BenchmarkMarshalMsgproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTrackerContract(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTrackerContract(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTrackerContract{})
+}
+
+func BenchmarkMarshalMsgproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposalValue(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposalValue{}
@@ -374,6 +1334,66 @@ func BenchmarkUnmarshalproposalValue(b *testing.B) {
}
}
+func TestMarshalUnmarshalproposalVoteCounter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalVoteCounter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalVoteCounter(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalVoteCounter{})
+}
+
+func BenchmarkMarshalMsgproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposerSeed(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposerSeed{}
@@ -494,6 +1514,126 @@ func BenchmarkUnmarshalrawVote(b *testing.B) {
}
}
+func TestMarshalUnmarshalrootRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := rootRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingrootRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &rootRouter{})
+}
+
+func BenchmarkMarshalMsgrootRouter(b *testing.B) {
+ v := rootRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgrootRouter(b *testing.B) {
+ v := rootRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalrootRouter(b *testing.B) {
+ v := rootRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalroundRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := roundRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingroundRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &roundRouter{})
+}
+
+func BenchmarkMarshalMsgroundRouter(b *testing.B) {
+ v := roundRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgroundRouter(b *testing.B) {
+ v := roundRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalroundRouter(b *testing.B) {
+ v := roundRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalseedInput(t *testing.T) {
partitiontest.PartitionTest(t)
v := seedInput{}
@@ -614,6 +1754,126 @@ func BenchmarkUnmarshalselector(b *testing.B) {
}
}
+func TestMarshalUnmarshalstepRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := stepRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingstepRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &stepRouter{})
+}
+
+func BenchmarkMarshalMsgstepRouter(b *testing.B) {
+ v := stepRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgstepRouter(b *testing.B) {
+ v := stepRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalstepRouter(b *testing.B) {
+ v := stepRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalthresholdEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := thresholdEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingthresholdEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &thresholdEvent{})
+}
+
+func BenchmarkMarshalMsgthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshaltransmittedPayload(t *testing.T) {
partitiontest.PartitionTest(t)
v := transmittedPayload{}
@@ -974,6 +2234,66 @@ func BenchmarkUnmarshalvote(b *testing.B) {
}
}
+func TestMarshalUnmarshalvoteAggregator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteAggregator{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteAggregator(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteAggregator{})
+}
+
+func BenchmarkMarshalMsgvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalvoteAuthenticator(t *testing.T) {
partitiontest.PartitionTest(t)
v := voteAuthenticator{}
@@ -1033,3 +2353,243 @@ func BenchmarkUnmarshalvoteAuthenticator(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalvoteTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTracker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTracker(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTracker{})
+}
+
+func BenchmarkMarshalMsgvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerContract(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerContract(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerContract{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerPeriod(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerPeriod{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerPeriod(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerPeriod{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerRound{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerRound(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerRound{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/agreement/persistence.go b/agreement/persistence.go
index aef2a0f60..497e4b9af 100644
--- a/agreement/persistence.go
+++ b/agreement/persistence.go
@@ -33,10 +33,14 @@ import (
// diskState represents the state required by the agreement protocol to be persistent.
type diskState struct {
- Router, Player, Clock []byte
+ _struct struct{} `codec:","`
- ActionTypes []actionType
- Actions [][]byte
+ Router []byte
+ Player []byte
+ Clock []byte
+
+ ActionTypes []actionType `codec:"ActionTypes,allocbound=-"`
+ Actions [][]byte `codec:"Actions,allocbound=-"`
}
func persistent(as []action) bool {
@@ -49,17 +53,30 @@ func persistent(as []action) bool {
}
// encode serializes the current state into a byte array.
-func encode(t timers.Clock, rr rootRouter, p player, a []action) []byte {
+func encode(t timers.Clock, rr rootRouter, p player, a []action, reflect bool) (raw []byte) {
var s diskState
- s.Router = protocol.EncodeReflect(rr)
- s.Player = protocol.EncodeReflect(p)
+ if reflect {
+ s.Router = protocol.EncodeReflect(rr)
+ s.Player = protocol.EncodeReflect(p)
+ } else {
+ s.Router = protocol.Encode(&rr)
+ s.Player = protocol.Encode(&p)
+ }
s.Clock = t.Encode()
- for _, act := range a {
- s.ActionTypes = append(s.ActionTypes, act.t())
- s.Actions = append(s.Actions, protocol.EncodeReflect(act))
+ s.ActionTypes = make([]actionType, len(a))
+ s.Actions = make([][]byte, len(a))
+ for i, act := range a {
+ s.ActionTypes[i] = act.t()
+
+ // still use reflection for actions since action is an interface and we can't define marshaller methods on it
+ s.Actions[i] = protocol.EncodeReflect(act)
}
- raw := protocol.EncodeReflect(s)
- return raw
+ if reflect {
+ raw = protocol.EncodeReflect(s)
+ } else {
+ raw = protocol.Encode(&s)
+ }
+ return
}
// persist atomically writes state to the crash database.
@@ -177,17 +194,28 @@ func restore(log logging.Logger, crash db.Accessor) (raw []byte, err error) {
// decode process the incoming raw bytes array and attempt to reconstruct the agreement state objects.
//
// In all decoding errors, it returns the error code in err
-func decode(raw []byte, t0 timers.Clock, log serviceLogger) (t timers.Clock, rr rootRouter, p player, a []action, err error) {
+func decode(raw []byte, t0 timers.Clock, log serviceLogger, reflect bool) (t timers.Clock, rr rootRouter, p player, a []action, err error) {
var t2 timers.Clock
var rr2 rootRouter
var p2 player
a2 := []action{}
var s diskState
-
- err = protocol.DecodeReflect(raw, &s)
- if err != nil {
- log.Errorf("decode (agreement): error decoding retrieved state (len = %v): %v", len(raw), err)
- return
+ if reflect {
+ err = protocol.DecodeReflect(raw, &s)
+ if err != nil {
+ log.Errorf("decode (agreement): error decoding retrieved state (len = %v): %v", len(raw), err)
+ return
+ }
+ } else {
+ err = protocol.Decode(raw, &s)
+ if err != nil {
+ log.Warnf("decode (agreement): error decoding retrieved state using msgp (len = %v): %v. Trying reflection", len(raw), err)
+ err = protocol.DecodeReflect(raw, &s)
+ if err != nil {
+ log.Errorf("decode (agreement): error decoding using either reflection or msgp): %v", err)
+ return
+ }
+ }
}
t2, err = t0.Decode(s.Clock)
@@ -195,19 +223,43 @@ func decode(raw []byte, t0 timers.Clock, log serviceLogger) (t timers.Clock, rr
return
}
- err = protocol.DecodeReflect(s.Player, &p2)
- if err != nil {
- return
- }
+ if reflect {
+ err = protocol.DecodeReflect(s.Player, &p2)
+ if err != nil {
+ return
+ }
- rr2 = makeRootRouter(p2)
- err = protocol.DecodeReflect(s.Router, &rr2)
- if err != nil {
- return
+ rr2 = makeRootRouter(p2)
+ err = protocol.DecodeReflect(s.Router, &rr2)
+ if err != nil {
+ return
+ }
+ } else {
+ err = protocol.Decode(s.Player, &p2)
+ if err != nil {
+ log.Warnf("decode (agreement): failed to decode Player using msgp (len = %v): %v. Trying reflection", len(s.Player), err)
+ err = protocol.DecodeReflect(s.Player, &p2)
+ if err != nil {
+ log.Errorf("decode (agreement): failed to decode Player using either reflection or msgp: %v", err)
+ return
+ }
+ }
+ rr2 = makeRootRouter(p2)
+ err = protocol.Decode(s.Router, &rr2)
+ if err != nil {
+ log.Warnf("decode (agreement): failed to decode Router using msgp (len = %v): %v. Trying reflection", len(s.Router), err)
+ rr2 = makeRootRouter(p2)
+ err = protocol.DecodeReflect(s.Router, &rr2)
+ if err != nil {
+ log.Errorf("decode (agreement): failed to decode Router using either reflection or msgp: %v", err)
+ return
+ }
+ }
}
for i := range s.Actions {
act := zeroAction(s.ActionTypes[i])
+ // always use reflection for actions since action is an interface and we can't define unmarshaller methods on it
err = protocol.DecodeReflect(s.Actions[i], &act)
if err != nil {
return
@@ -308,7 +360,7 @@ func (p *asyncPersistenceLoop) loop(ctx context.Context) {
// sanity check; we check it after the fact, since it's not expected to ever happen.
// performance-wise, it takes approximitly 300000ns to execute, and we don't want it to
// block the persist operation.
- _, _, _, _, derr := decode(s.raw, s.clock, p.log)
+ _, _, _, _, derr := decode(s.raw, s.clock, p.log, false)
if derr != nil {
p.log.Errorf("could not decode own encoded disk state: %v", derr)
}
diff --git a/agreement/persistence_test.go b/agreement/persistence_test.go
index 94221f7be..7a4ec3db4 100644
--- a/agreement/persistence_test.go
+++ b/agreement/persistence_test.go
@@ -25,7 +25,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/timers"
@@ -34,17 +36,17 @@ import (
func TestAgreementSerialization(t *testing.T) {
partitiontest.PartitionTest(t)
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
status := player{Round: 350, Step: soft, Deadline: time.Duration(23) * time.Second}
router := makeRootRouter(status)
- a := []action{}
+ a := []action{checkpointAction{}, disconnectAction(messageEvent{}, nil)}
- encodedBytes := encode(clock, router, status, a)
+ encodedBytes := encode(clock, router, status, a, false)
t0 := timers.MakeMonotonicClock(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC))
log := makeServiceLogger(logging.Base())
- clock2, router2, status2, a2, err := decode(encodedBytes, t0, log)
+ clock2, router2, status2, a2, err := decode(encodedBytes, t0, log, false)
require.NoError(t, err)
require.Equalf(t, clock, clock2, "Clock wasn't serialized/deserialized correctly")
require.Equalf(t, router, router2, "Router wasn't serialized/deserialized correctly")
@@ -53,7 +55,7 @@ func TestAgreementSerialization(t *testing.T) {
}
func BenchmarkAgreementSerialization(b *testing.B) {
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
b.SkipNow()
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
@@ -63,12 +65,12 @@ func BenchmarkAgreementSerialization(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
- encode(clock, router, status, a)
+ encode(clock, router, status, a, false)
}
}
func BenchmarkAgreementDeserialization(b *testing.B) {
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
b.SkipNow()
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
@@ -76,12 +78,12 @@ func BenchmarkAgreementDeserialization(b *testing.B) {
router := makeRootRouter(status)
a := []action{}
- encodedBytes := encode(clock, router, status, a)
+ encodedBytes := encode(clock, router, status, a, false)
t0 := timers.MakeMonotonicClock(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC))
log := makeServiceLogger(logging.Base())
b.ResetTimer()
for n := 0; n < b.N; n++ {
- decode(encodedBytes, t0, log)
+ decode(encodedBytes, t0, log, false)
}
}
@@ -163,3 +165,124 @@ func BenchmarkAgreementPersistenceRecovery(b *testing.B) {
restore(serviceLogger{Logger: logging.Base()}, accessor)
}
}
+
+func randomizeDiskState() (rr rootRouter, p player) {
+ p2, err := protocol.RandomizeObject(&player{})
+ if err != nil {
+ return
+ }
+ rr2, err := protocol.RandomizeObject(&rootRouter{})
+ if err != nil {
+ return
+ }
+ p = *(p2.(*player))
+ rr = *(rr2.(*rootRouter))
+ return
+}
+
+func TestRandomizedEncodingFullDiskState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ for i := 0; i < 5000; i++ {
+ router, player := randomizeDiskState()
+ a := []action{}
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ log := makeServiceLogger(logging.Base())
+ e1 := encode(clock, router, player, a, true)
+ e2 := encode(clock, router, player, a, false)
+ require.Equalf(t, e1, e2, "msgp and go-codec encodings differ: len(msgp)=%v, len(reflect)=%v", len(e1), len(e2))
+ _, rr1, p1, _, err1 := decode(e1, clock, log, true)
+ _, rr2, p2, _, err2 := decode(e1, clock, log, false)
+ require.NoErrorf(t, err1, "reflect decoding failed")
+ require.NoErrorf(t, err2, "msgp decoding failed")
+ require.Equalf(t, rr1, rr2, "rootRouters decoded differently")
+ require.Equalf(t, p1, p2, "players decoded differently")
+ }
+
+}
+
+func BenchmarkRandomizedEncode(b *testing.B) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ router, player := randomizeDiskState()
+ a := []action{}
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ encode(clock, router, player, a, false)
+ }
+}
+
+func BenchmarkRandomizedDecode(b *testing.B) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ router, player := randomizeDiskState()
+ a := []action{}
+ ds := encode(clock, router, player, a, false)
+ log := makeServiceLogger(logging.Base())
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ decode(ds, clock, log, false)
+ }
+}
+
+func TestEmptyMapDeserialization(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var rr, rr1 rootRouter
+ rr.Children = make(map[basics.Round]*roundRouter)
+ e := protocol.Encode(&rr)
+ err := protocol.Decode(e, &rr1)
+ require.NoError(t, err)
+ require.NotNil(t, rr1.Children)
+
+ var v, v1 voteTracker
+ v.Equivocators = make(map[basics.Address]equivocationVote)
+ ve := protocol.Encode(&v)
+ err = protocol.Decode(ve, &v1)
+ require.NoError(t, err)
+ require.NotNil(t, v1.Equivocators)
+}
+
+func TestDecodeFailures(t *testing.T) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ ce := clock.Encode()
+ log := makeServiceLogger(logging.Base())
+ player := player{Round: 350, Step: soft, Deadline: time.Duration(23) * time.Second}
+ router := makeRootRouter(player)
+ pe := protocol.Encode(&player)
+ re := protocol.Encode(&router)
+
+ // diskState decoding failure
+ {
+ type diskState struct {
+ UnexpectedDiskField int64
+ }
+ uds := diskState{UnexpectedDiskField: 5}
+ udse := protocol.EncodeReflect(uds)
+ _, _, _, _, err := decode(udse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedDiskField")
+
+ }
+
+ // player decoding failure
+ {
+ type player struct {
+ UnexpectedPlayerField int64
+ }
+ p := player{UnexpectedPlayerField: 3}
+ pe := protocol.EncodeReflect(p)
+ ds := diskState{Player: pe, Router: re, Clock: ce}
+ dse := protocol.EncodeReflect(ds)
+ _, _, _, _, err := decode(dse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedPlayerField")
+ }
+
+ // router decoding failure
+ {
+ type rootRouter struct {
+ UnexpectedRouterField int64
+ }
+ router := rootRouter{UnexpectedRouterField: 5}
+ re := protocol.EncodeReflect(router)
+ ds := diskState{Player: pe, Router: re, Clock: ce}
+ dse := protocol.EncodeReflect(ds)
+ _, _, _, _, err := decode(dse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedRouterField")
+ }
+}
diff --git a/agreement/player.go b/agreement/player.go
index 2add5711e..cc29240aa 100644
--- a/agreement/player.go
+++ b/agreement/player.go
@@ -26,6 +26,7 @@ import (
// The player implements the top-level state machine functionality of the
// agreement protocol.
type player struct {
+ _struct struct{} `codec:","`
// Round, Period, and Step hold the current round, period, and step of
// the player state machine.
Round round
@@ -391,7 +392,7 @@ func (p *player) enterRound(r routerHandle, source event, target round) []action
if e.t() == payloadPipelined {
e := e.(payloadProcessedEvent)
- msg := message{MessageHandle: 0, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: e.UnauthenticatedPayload} // TODO do we want to keep around the original handle?
+ msg := message{messageHandle: 0, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: e.UnauthenticatedPayload} // TODO do we want to keep around the original handle?
a := verifyPayloadAction(messageEvent{T: payloadPresent, Input: msg}, p.Round, e.Period, e.Pinned)
actions = append(actions, a)
}
@@ -570,7 +571,7 @@ func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []a
}
// relay as the proposer
- if e.Input.MessageHandle == nil {
+ if e.Input.messageHandle == nil {
var uv unauthenticatedVote
switch ef.t() {
case payloadPipelined, payloadAccepted:
diff --git a/agreement/player_permutation_test.go b/agreement/player_permutation_test.go
index 251a27622..d7dcf9add 100644
--- a/agreement/player_permutation_test.go
+++ b/agreement/player_permutation_test.go
@@ -69,7 +69,7 @@ func getPlayerPermutation(t *testing.T, n int) (plyr *player, pMachine ioAutomat
plyr.Pending.push(&messageEvent{
T: payloadPresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
},
})
@@ -161,7 +161,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -172,7 +172,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: votePresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedVote: vvote.u(),
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
@@ -182,7 +182,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -193,7 +193,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -205,7 +205,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: votePresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedVote: vvote.u(),
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
@@ -214,7 +214,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: payloadPresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
},
}
@@ -222,7 +222,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: payloadVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
Proposal: *payload,
},
@@ -278,7 +278,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -290,7 +290,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -303,7 +303,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
Input: message{
Bundle: bundle{},
UnauthenticatedBundle: unauthenticatedBundle{},
- MessageHandle: "uniquemalformedBundle",
+ messageHandle: "uniquemalformedBundle",
},
Err: errTestVerifyFailed,
}
diff --git a/agreement/player_test.go b/agreement/player_test.go
index ecd871bc1..3e3cf8167 100644
--- a/agreement/player_test.go
+++ b/agreement/player_test.go
@@ -1877,7 +1877,7 @@ func TestPlayerPropagatesProposalPayload(t *testing.T) {
require.NoError(t, panicErr)
m := message{
- MessageHandle: "msghandle",
+ messageHandle: "msghandle",
UnauthenticatedProposal: payload.u(),
}
inMsg = messageEvent{
@@ -1952,7 +1952,7 @@ func TestPlayerPropagatesProposalPayloadFutureRound(t *testing.T) {
require.NoError(t, panicErr)
m := message{
- MessageHandle: "msghandle",
+ messageHandle: "msghandle",
UnauthenticatedProposal: payload.u(),
}
inMsg = messageEvent{
@@ -2269,7 +2269,7 @@ func TestPlayerDisconnectsFromMalformedProposalVote(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2287,7 +2287,7 @@ func TestPlayerIgnoresMalformedPayload(t *testing.T) {
// check ignore on malformed payloads
m := message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Proposal: proposal{},
UnauthenticatedProposal: unauthenticatedProposal{},
}
@@ -2308,7 +2308,7 @@ func TestPlayerIgnoresMalformedPayload(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == ignore && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == ignore && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2329,7 +2329,7 @@ func TestPlayerDisconnectsFromMalformedVotes(t *testing.T) {
m := message{
Vote: vv,
UnauthenticatedVote: vv.u(),
- MessageHandle: "uniquemalformedvote",
+ messageHandle: "uniquemalformedvote",
}
inMsg := messageEvent{
T: voteVerified,
@@ -2348,7 +2348,7 @@ func TestPlayerDisconnectsFromMalformedVotes(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2368,7 +2368,7 @@ func TestPlayerDisconnectsFromMalformedBundles(t *testing.T) {
m := message{
Bundle: bundle{},
UnauthenticatedBundle: unauthenticatedBundle{},
- MessageHandle: "uniquemalformedBundle",
+ messageHandle: "uniquemalformedBundle",
}
inMsg := messageEvent{
Err: verifyError,
@@ -2387,7 +2387,7 @@ func TestPlayerDisconnectsFromMalformedBundles(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2524,7 +2524,7 @@ func TestPlayerRequestsPipelinedPayloadVerification(t *testing.T) {
require.NoError(t, panicErr)
m := message{
UnauthenticatedProposal: payloadTwo.u(),
- MessageHandle: "r2",
+ messageHandle: "r2",
}
inMsg = messageEvent{
T: payloadPresent,
diff --git a/agreement/proposalManager.go b/agreement/proposalManager.go
index 8cf03b32f..5abe5f052 100644
--- a/agreement/proposalManager.go
+++ b/agreement/proposalManager.go
@@ -28,7 +28,10 @@ import (
// payload{Present,Verified}, roundInterruption, {soft,cert,next}Threshold.
// It returns the following type(s) of event: none, vote{Filtered,Malformed},
// payload{Pipelined,Rejected,Accepted}, and proposal{Accepted,Committable}.
-type proposalManager struct{}
+
+type proposalManager struct {
+ _struct struct{} `codec:","`
+}
func (m *proposalManager) T() stateMachineTag {
return proposalMachine
diff --git a/agreement/proposalStore.go b/agreement/proposalStore.go
index e375ef92f..841dc91b9 100644
--- a/agreement/proposalStore.go
+++ b/agreement/proposalStore.go
@@ -37,6 +37,7 @@ var proposalAlreadyAssembledCounter = metrics.MakeCounter(
// Once a proposal is successfully validated, it is stored by the
// blockAssembler.
type blockAssembler struct {
+ _struct struct{} `codec:","`
// Pipeline contains a proposal which has not yet been validated. The
// proposal might be inside the cryptoVerifier, or it might be a
// pipelined proposal from the next round.
@@ -53,7 +54,7 @@ type blockAssembler struct {
// for a given proposal-value. When a proposal payload is relayed by
// the state machine, a matching can be concatenated with the vote to
// ensure that peers do not drop the proposal payload.
- Authenticators []vote
+ Authenticators []vote `codec:"Authenticators,allocbound=-"`
}
// pipeline adds the given unvalidated proposal to the blockAssembler, returning
@@ -120,11 +121,12 @@ func (a blockAssembler) trim(p period) blockAssembler {
// It returns the following type(s) of event: none, voteFiltered,
// proposal{Accepted,Committable}, and payload{Pipelined,Rejected}.
type proposalStore struct {
+ _struct struct{} `codec:","`
// Relevant contains a current collection of important proposal-values
// in the round. Relevant is indexed by period, and the proposalValue is
// the last one reported by the corresponding proposalMachinePeriod.
// Each corresponding proposal is tracked in Assemblers.
- Relevant map[period]proposalValue
+ Relevant map[period]proposalValue `codec:"Relevant,allocbound=-"`
// Pinned contains the extra proposal-value, not tracked in Relevant,
// for which a certificate may have formed (i.e., vbar in the spec).
// The proposal corresponding to Pinned is tracked in Assemblers.
@@ -132,7 +134,7 @@ type proposalStore struct {
// Assemblers contains the set of proposal-values currently tracked and
// held by the proposalStore.
- Assemblers map[proposalValue]blockAssembler
+ Assemblers map[proposalValue]blockAssembler `codec:"Assemblers,allocbound=-"`
}
func (store *proposalStore) T() stateMachineTag {
diff --git a/agreement/proposalTable.go b/agreement/proposalTable.go
index 79448f403..b3ef71ac6 100644
--- a/agreement/proposalTable.go
+++ b/agreement/proposalTable.go
@@ -19,22 +19,24 @@ package agreement
// A proposalTable stores proposals which need to be authenticated
// after their prior votes have been processed.
type proposalTable struct {
- Pending map[int]*messageEvent
- PendingNext int
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Pending map[uint64]*messageEvent `codec:"Pending,allocbound=-"`
+ PendingNext uint64
}
// push adds a proposal to the proposalTable.
-func (t *proposalTable) push(e *messageEvent) int {
+func (t *proposalTable) push(e *messageEvent) uint64 {
t.PendingNext++
if t.Pending == nil {
- t.Pending = make(map[int]*messageEvent)
+ t.Pending = make(map[uint64]*messageEvent)
}
t.Pending[t.PendingNext] = e
return t.PendingNext
}
// pop takes a proposal from the proposalTable.
-func (t *proposalTable) pop(taskIndex int) *messageEvent {
+func (t *proposalTable) pop(taskIndex uint64) *messageEvent {
res := t.Pending[taskIndex]
delete(t.Pending, taskIndex)
return res
diff --git a/agreement/proposalTable_test.go b/agreement/proposalTable_test.go
new file mode 100644
index 000000000..af81305fb
--- /dev/null
+++ b/agreement/proposalTable_test.go
@@ -0,0 +1,69 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// This test is only necessary for transition to msgp encoding
+// of the player state machine for agreement persistence
+func TestProposalTableMsgpEncoding(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type messageMetadata struct {
+ raw network.IncomingMessage
+ }
+ encoded, err := base64.StdEncoding.DecodeString("gqdQZW5kaW5ngQGHqUNhbmNlbGxlZMKjRXJywKVJbnB1dImmQnVuZGxlgK9Db21wb3VuZE1lc3NhZ2WCqFByb3Bvc2FsgKRWb3RlgK1NZXNzYWdlSGFuZGxlgKhQcm9wb3NhbICjVGFnolBQtVVuYXV0aGVudGljYXRlZEJ1bmRsZYC3VW5hdXRoZW50aWNhdGVkUHJvcG9zYWyAs1VuYXV0aGVudGljYXRlZFZvdGWApFZvdGWApVByb3RvgqNFcnLAp1ZlcnNpb26goVQApFRhaWzAqVRhc2tJbmRleD+rUGVuZGluZ05leHQB")
+ require.NoError(t, err)
+
+ // run on master a3e90ad to get the encoded data for above
+ // pt := proposalTable{}
+ // msg := messageEvent{
+ // Input: message{
+ // Tag: protocol.ProposalPayloadTag,
+ // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}},
+ // },
+ // TaskIndex: 63}
+ // pt.push(&msg)
+ // result := protocol.EncodeReflect(&pt)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ var ptMsgp, ptReflect proposalTable
+ err = protocol.Decode(encoded, &ptMsgp)
+ require.NoError(t, err)
+ err = protocol.DecodeReflect(encoded, &ptReflect)
+ require.NoError(t, err)
+
+ msgMsgp := ptMsgp.pop(ptMsgp.PendingNext)
+ msgReflect := ptReflect.pop(ptReflect.PendingNext)
+
+ // After setting MessageHandle to nil they should be the same
+ msgMsgp.Input.MessageHandle = nil
+ msgReflect.Input.MessageHandle = nil
+ require.Equal(t, msgMsgp, msgReflect)
+ // Check that the other fields we have manually set are still the same
+ require.Equal(t, msgMsgp.Input.Tag, protocol.ProposalPayloadTag)
+ require.Equal(t, msgMsgp.TaskIndex, uint64(63))
+
+}
diff --git a/agreement/proposalTracker.go b/agreement/proposalTracker.go
index c76c5c9fd..e3efcd372 100644
--- a/agreement/proposalTracker.go
+++ b/agreement/proposalTracker.go
@@ -25,6 +25,7 @@ import (
// A proposalSeeker finds the vote with the lowest credential until freeze() is
// called.
type proposalSeeker struct {
+ _struct struct{} `codec:","`
// Lowest contains the vote with the lowest credential seen so far.
Lowest vote
// Filled is set if any vote has been seen.
@@ -66,10 +67,11 @@ func (s proposalSeeker) freeze() proposalSeeker {
// It returns the following type(s) of event: voteFiltered, proposalAccepted, readStaging,
// and proposalFrozen.
type proposalTracker struct {
+ _struct struct{} `codec:","`
// Duplicate holds the set of senders which has been seen by the
// proposalTracker. A duplicate proposal-vote or an equivocating
// proposal-vote is dropped by a proposalTracker.
- Duplicate map[basics.Address]bool
+ Duplicate map[basics.Address]bool `codec:"Duplicate,allocbound=-"`
// Freezer holds a proposalSeeker, which seeks the proposal-vote with
// the lowest credential seen by the proposalTracker.
Freezer proposalSeeker
diff --git a/agreement/proposalTrackerContract.go b/agreement/proposalTrackerContract.go
index c33feb841..bbe4911c2 100644
--- a/agreement/proposalTrackerContract.go
+++ b/agreement/proposalTrackerContract.go
@@ -21,6 +21,8 @@ import (
)
type proposalTrackerContract struct {
+ _struct struct{} `codec:","`
+
SawOneVote bool
Froze bool
SawSoftThreshold bool
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index 06a91b210..6075e8ebf 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -387,7 +387,7 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
asyncVerifyingVotes := len(unverifiedVotes)
for i, uv := range unverifiedVotes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, uint64(i), msg, results)
if err != nil {
orderedResults[i].err = err
t.node.log.Infof("pseudonode.makeVotes: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
@@ -515,7 +515,7 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
asyncVerifyingVotes := len(votes)
for i, uv := range votes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, uint64(i), msg, results)
if err != nil {
cryptoOutputs[i].err = err
t.node.log.Infof("pseudonode.makeProposals: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
diff --git a/agreement/router.go b/agreement/router.go
index fd0638a01..f6f0ea010 100644
--- a/agreement/router.go
+++ b/agreement/router.go
@@ -66,6 +66,8 @@ type router interface {
}
type rootRouter struct {
+ _struct struct{} `codec:","`
+
root actor // playerMachine (not restored: explicitly set on construction)
proposalRoot listener // proposalMachine
voteRoot listener // voteMachine
@@ -73,20 +75,24 @@ type rootRouter struct {
ProposalManager proposalManager
VoteAggregator voteAggregator
- Children map[round]*roundRouter
+ Children map[round]*roundRouter `codec:"Children,allocbound=-"`
}
type roundRouter struct {
+ _struct struct{} `codec:","`
+
proposalRoot listener // proposalMachineRound
voteRoot listener // voteMachineRound
ProposalStore proposalStore
VoteTrackerRound voteTrackerRound
- Children map[period]*periodRouter
+ Children map[period]*periodRouter `codec:"Children,allocbound=-"`
}
type periodRouter struct {
+ _struct struct{} `codec:","`
+
proposalRoot listener // proposalMachinePeriod
voteRoot listener // voteMachinePeriod
@@ -95,10 +101,11 @@ type periodRouter struct {
ProposalTrackerContract proposalTrackerContract
- Children map[step]*stepRouter
+ Children map[step]*stepRouter `codec:"Children,allocbound=-"`
}
type stepRouter struct {
+ _struct struct{} `codec:","`
voteRoot listener // voteMachineStep
VoteTracker voteTracker
diff --git a/agreement/service.go b/agreement/service.go
index 346234950..00b192b5c 100644
--- a/agreement/service.go
+++ b/agreement/service.go
@@ -191,7 +191,7 @@ func (s *Service) mainLoop(input <-chan externalEvent, output chan<- []action, r
var err error
raw, err := restore(s.log, s.Accessor)
if err == nil {
- clock, router, status, a, err = decode(raw, s.Clock, s.log)
+ clock, router, status, a, err = decode(raw, s.Clock, s.log, false)
if err != nil {
reset(s.log, s.Accessor)
} else {
@@ -246,7 +246,7 @@ func (s *Service) mainLoop(input <-chan externalEvent, output chan<- []action, r
// usage semantics : caller should ensure to call this function only when we have participation
// keys for the given voting round.
func (s *Service) persistState(done chan error) (events <-chan externalEvent) {
- raw := encode(s.Clock, s.persistRouter, s.persistStatus, s.persistActions)
+ raw := encode(s.Clock, s.persistRouter, s.persistStatus, s.persistActions, false)
return s.persistenceLoop.Enqueue(s.Clock, s.persistStatus.Round, s.persistStatus.Period, s.persistStatus.Step, raw, done)
}
diff --git a/agreement/sort.go b/agreement/sort.go
new file mode 100644
index 000000000..21fa3fc27
--- /dev/null
+++ b/agreement/sort.go
@@ -0,0 +1,84 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "bytes"
+
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// These types are defined to satisfy SortInterface used by
+
+// SortAddress is re-exported from basics.Address since the interface is already defined there
+//msgp:sort basics.Address SortAddress
+type SortAddress = basics.SortAddress
+
+// SortUint64 is re-exported from basics since the interface is already defined there
+// canonical encoding of maps in msgpack format.
+type SortUint64 = basics.SortUint64
+
+// SortStep defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortStep
+//msgp:sort step SortStep
+type SortStep []step
+
+func (a SortStep) Len() int { return len(a) }
+func (a SortStep) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortStep) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortPeriod defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortPeriod
+//msgp:sort period SortPeriod
+type SortPeriod []period
+
+func (a SortPeriod) Len() int { return len(a) }
+func (a SortPeriod) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortPeriod) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortRound defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortRound
+//msgp:sort round SortRound
+type SortRound []round
+
+func (a SortRound) Len() int { return len(a) }
+func (a SortRound) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortRound) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortProposalValue defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortProposalValue
+//msgp:sort proposalValue SortProposalValue
+type SortProposalValue []proposalValue
+
+func (a SortProposalValue) Len() int { return len(a) }
+func (a SortProposalValue) Less(i, j int) bool {
+ if a[i].OriginalPeriod != a[j].OriginalPeriod {
+ return a[i].OriginalPeriod < a[j].OriginalPeriod
+ }
+ cmp := bytes.Compare(a[i].OriginalProposer[:], a[j].OriginalProposer[:])
+ if cmp != 0 {
+ return cmp < 0
+ }
+ cmp = bytes.Compare(a[i].BlockDigest[:], a[j].BlockDigest[:])
+ if cmp != 0 {
+ return cmp < 0
+ }
+ cmp = bytes.Compare(a[i].EncodingDigest[:], a[j].EncodingDigest[:])
+ return cmp < 0
+}
+
+func (a SortProposalValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/agreement/sort_test.go b/agreement/sort_test.go
new file mode 100644
index 000000000..8240e5eff
--- /dev/null
+++ b/agreement/sort_test.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSortProposalValueLess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // initialize a new digest with all bytes being 'a'
+ d1 := new(crypto.Digest)
+ for i := range d1 {
+ d1[i] = byte('a')
+ }
+ p1 := proposalValue{
+ OriginalPeriod: 1,
+ OriginalProposer: basics.Address(*d1),
+ BlockDigest: *d1,
+ EncodingDigest: *d1,
+ }
+ sp := SortProposalValue{p1, p1}
+ // They are both equal so Less should return false regardless of order
+ require.Falsef(t, sp.Less(0, 1), "%v < %v is true for equal values", sp[0], sp[1])
+ require.Falsef(t, sp.Less(1, 0), "%v < %v is true for equal values", sp[1], sp[0])
+
+ // working our way backwards from the order of checks in sortProposalValue.Less()
+ // the test is tied to the implementation because it defines what the canonical order of checks is
+ sp[1].EncodingDigest[3] = byte('b')
+ require.Truef(t, sp.Less(0, 1), "expected %v < % v", sp[0], sp[1])
+ sp[0].BlockDigest[3] = byte('b')
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+ sp[1].BlockDigest[3] = byte('c')
+ require.Truef(t, sp.Less(0, 1), "expected %v < %v", sp[0], sp[1])
+ sp[0].OriginalProposer[3] = byte('b')
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+ sp[1].OriginalProposer[3] = byte('c')
+ require.Truef(t, sp.Less(0, 1), "expected %v < %v", sp[0], sp[1])
+ sp[0].OriginalPeriod = 2
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+}
diff --git a/agreement/voteAggregator.go b/agreement/voteAggregator.go
index 057f9d14f..196e91a3b 100644
--- a/agreement/voteAggregator.go
+++ b/agreement/voteAggregator.go
@@ -29,7 +29,9 @@ import (
// bundlePresent, and bundleVerified.
// It returns the following type(s) of event: none, vote{Filtered,Malformed},
// bundle{Filtered,Malformed}, and {soft,cert,next}Threshold.
-type voteAggregator struct{}
+type voteAggregator struct {
+ _struct struct{} `codec:","`
+}
func (agg *voteAggregator) T() stateMachineTag {
return voteMachine
diff --git a/agreement/voteAggregator_test.go b/agreement/voteAggregator_test.go
index f23bd4d56..8795a0b36 100644
--- a/agreement/voteAggregator_test.go
+++ b/agreement/voteAggregator_test.go
@@ -900,7 +900,7 @@ func TestVoteAggregatorOldVote(t *testing.T) {
results := make(chan asyncVerifyVoteResponse, len(uvs))
for i, uv := range uvs {
- avv.verifyVote(context.Background(), ledger, uv, i, message{}, results)
+ avv.verifyVote(context.Background(), ledger, uv, uint64(i), message{}, results)
result := <-results
require.True(t, result.cancelled)
}
diff --git a/agreement/voteAuxiliary.go b/agreement/voteAuxiliary.go
index 0c6e85c47..d99a01139 100644
--- a/agreement/voteAuxiliary.go
+++ b/agreement/voteAuxiliary.go
@@ -19,6 +19,7 @@ package agreement
// A voteTrackerPeriod is a voteMachinePeriod which indicates whether a
// next-threshold of votes was observed for a some value in a period.
type voteTrackerPeriod struct {
+ _struct struct{} `codec:","`
// Make it explicit that we are serializing player fields for crash recovery;
// we should probably adopt this convention over the rest of player at some point.
Cached nextThresholdStatusEvent
@@ -99,6 +100,7 @@ func (t *voteTrackerPeriod) handle(r routerHandle, p player, e event) event {
// It returns the following type(s) of event: none and
// {soft,cert,next}Threshold, and freshestBundle
type voteTrackerRound struct {
+ _struct struct{} `codec:","`
// Freshest holds the freshest thresholdEvent seen this round.
Freshest thresholdEvent
// Ok is set if any thresholdEvent has been seen.
diff --git a/agreement/voteTracker.go b/agreement/voteTracker.go
index d0f717abd..394584015 100644
--- a/agreement/voteTracker.go
+++ b/agreement/voteTracker.go
@@ -27,8 +27,10 @@ import (
)
type proposalVoteCounter struct {
+ _struct struct{} `codec:","`
+
Count uint64
- Votes map[basics.Address]vote
+ Votes map[basics.Address]vote `codec:"Votes,allocbound=-"`
}
// A voteTracker is a voteMachineStep which handles duplication and
@@ -40,20 +42,21 @@ type proposalVoteCounter struct {
// It returns the following type(s) of event: none and
// {soft,cert,next}Threshold.
type voteTracker struct {
+ _struct struct{} `codec:","`
// Voters holds the set of voters which have voted in the current step.
// It is used to track whether a voter has equivocated.
- Voters map[basics.Address]vote
+ Voters map[basics.Address]vote `codec:"Voters,allocbound=-"`
// Counts holds the weighted sum of the votes for a given proposal.
// it also hold the individual votes.
// preconditions :
// Any proposalValue in Counts is gurenteed to contain at least one vote
- Counts map[proposalValue]proposalVoteCounter
+ Counts map[proposalValue]proposalVoteCounter `codec:"Counts,allocbound=-"`
// Equivocators holds the set of voters which have already equivocated
// once. Future votes from these voters are dropped and not
// propagated.
- Equivocators map[basics.Address]equivocationVote
+ Equivocators map[basics.Address]equivocationVote `codec:"Equivocators,allocbound=-"`
// EquivocatorsCount holds the number of equivocating votes which count
// for any proposal-value.
diff --git a/agreement/voteTrackerContract.go b/agreement/voteTrackerContract.go
index ca90a74a8..ad1585e6d 100644
--- a/agreement/voteTrackerContract.go
+++ b/agreement/voteTrackerContract.go
@@ -42,6 +42,8 @@ import (
// Trace properties
// - voteFilterRequest is idempotent
type voteTrackerContract struct {
+ _struct struct{} `codec:","`
+
Step step
StepOk bool
diff --git a/buildnumber.dat b/buildnumber.dat
index 00750edc0..0cfbf0888 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-3
+2
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index c55b3ea8d..41c75172e 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -53,11 +53,15 @@ type CatchpointCatchupStats struct {
TotalAccounts uint64
ProcessedAccounts uint64
VerifiedAccounts uint64
+ TotalKVs uint64
+ ProcessedKVs uint64
+ VerifiedKVs uint64
TotalBlocks uint64
AcquiredBlocks uint64
VerifiedBlocks uint64
ProcessedBytes uint64
TotalAccountHashes uint64
+ TotalKVHashes uint64
StartTime time.Time
}
@@ -303,7 +307,7 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
if err == nil {
cs.log.Infof("ledger downloaded in %d seconds", time.Since(start)/time.Second)
start = time.Now()
- err = cs.ledgerAccessor.BuildMerkleTrie(cs.ctx, cs.updateVerifiedAccounts)
+ err = cs.ledgerAccessor.BuildMerkleTrie(cs.ctx, cs.updateVerifiedCounts)
if err == nil {
cs.log.Infof("built merkle trie in %d seconds", time.Since(start)/time.Second)
break
@@ -335,12 +339,17 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
return nil
}
-// updateVerifiedAccounts update the user's statistics for the given verified accounts
-func (cs *CatchpointCatchupService) updateVerifiedAccounts(addedTrieHashes uint64) {
+// updateVerifiedCounts update the user's statistics for the given verified hashes
+func (cs *CatchpointCatchupService) updateVerifiedCounts(accountCount, kvCount uint64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
+
if cs.stats.TotalAccountHashes > 0 {
- cs.stats.VerifiedAccounts = cs.stats.TotalAccounts * addedTrieHashes / cs.stats.TotalAccountHashes
+ cs.stats.VerifiedAccounts = cs.stats.TotalAccounts * accountCount / cs.stats.TotalAccountHashes
+ }
+
+ if cs.stats.TotalKVs > 0 {
+ cs.stats.VerifiedKVs = kvCount
}
}
@@ -756,6 +765,8 @@ func (cs *CatchpointCatchupService) updateLedgerFetcherProgress(fetcherStats *le
defer cs.statsMu.Unlock()
cs.stats.TotalAccounts = fetcherStats.TotalAccounts
cs.stats.ProcessedAccounts = fetcherStats.ProcessedAccounts
+ cs.stats.TotalKVs = fetcherStats.TotalKVs
+ cs.stats.ProcessedKVs = fetcherStats.ProcessedKVs
cs.stats.ProcessedBytes = fetcherStats.ProcessedBytes
cs.stats.TotalAccountHashes = fetcherStats.TotalAccountHashes
}
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index 44e9ab19f..3c83baae0 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -148,7 +148,7 @@ func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler)
func (b *basicRPCNode) start() bool {
var err error
- b.listener, err = net.Listen("tcp", "")
+ b.listener, err = net.Listen("tcp", "127.0.0.1:")
if err != nil {
logging.Base().Error("tcp listen", err)
return false
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index fa965a154..30c5ccb3b 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -39,8 +39,8 @@ import (
var errNoLedgerForRound = errors.New("no ledger available for given round")
const (
- // maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk.
- maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk * basics.MaxEncodedAccountDataSize
+ // maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk and one account with max resources.
+ maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk*(ledger.MaxEncodedBaseAccountDataSize+ledger.MaxEncodedKVDataSize) + ledger.ResourcesPerCatchpointFileChunk*ledger.MaxEncodedBaseResourceDataSize
// defaultMinCatchpointFileDownloadBytesPerSecond defines the worst-case scenario download speed we expect to get while downloading a catchpoint file
defaultMinCatchpointFileDownloadBytesPerSecond = 20 * 1024
// catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each iteration from the incoming http data stream
@@ -146,10 +146,12 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
"writing balances to disk took %d seconds, "+
"writing creatables to disk took %d seconds, "+
"writing hashes to disk took %d seconds, "+
+ "writing kv pairs to disk took %d seconds, "+
"total duration is %d seconds",
downloadProgress.BalancesWriteDuration/time.Second,
downloadProgress.CreatablesWriteDuration/time.Second,
downloadProgress.HashesWriteDuration/time.Second,
+ downloadProgress.KVWriteDuration/time.Second,
writeDuration/time.Second)
}
@@ -191,5 +193,5 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
}
func (lf *ledgerFetcher) processBalancesBlock(ctx context.Context, sectionName string, bytes []byte, downloadProgress *ledger.CatchpointCatchupAccessorProgress) error {
- return lf.accessor.ProgressStagingBalances(ctx, sectionName, bytes, downloadProgress)
+ return lf.accessor.ProcessStagingBalances(ctx, sectionName, bytes, downloadProgress)
}
diff --git a/catchup/service.go b/catchup/service.go
index 1ebaf0fd3..1043718fe 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -124,6 +124,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode
func (s *Service) Start() {
s.done = make(chan struct{})
s.ctx, s.cancel = context.WithCancel(context.Background())
+ atomic.StoreUint32(&s.initialSyncNotified, 0)
s.InitialSyncDone = make(chan struct{})
go s.periodicSync()
}
diff --git a/cmd/algoh/blockWatcher.go b/cmd/algoh/blockWatcher.go
index cc55b2d49..43da894d0 100644
--- a/cmd/algoh/blockWatcher.go
+++ b/cmd/algoh/blockWatcher.go
@@ -20,15 +20,16 @@ import (
"sync"
"time"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
)
var log = logging.Base()
type blockListener interface {
init(uint64)
- onBlock(v1.Block)
+ onBlock(rpcs.EncodedBlockCert)
}
type blockWatcher struct {
@@ -75,7 +76,8 @@ func (bw *blockWatcher) run(watchers []blockListener, stallDetect time.Duration,
for {
// Inner loop needed during catchup.
for {
- block, err := bw.client.Block(curBlock)
+ // Get the raw block from the client, then parse the block so we can get the bookkeeping block and certificate for proposer address.
+ resp, err := bw.client.RawBlock(curBlock)
// Generally this error will be due to the new block not being ready. In the case of a stall we will
// return, causing the loop to restart and handle any possible stall/catchup.
@@ -89,6 +91,13 @@ func (bw *blockWatcher) run(watchers []blockListener, stallDetect time.Duration,
break
}
+ // Parse the raw block
+ var block rpcs.EncodedBlockCert
+ err = protocol.DecodeReflect(resp, &block)
+ if err != nil {
+ return false
+ }
+
curBlock++
for _, watcher := range watchers {
watcher.onBlock(block)
diff --git a/cmd/algoh/blockWatcher_test.go b/cmd/algoh/blockWatcher_test.go
index f3efd1aaf..1e4db0573 100644
--- a/cmd/algoh/blockWatcher_test.go
+++ b/cmd/algoh/blockWatcher_test.go
@@ -22,7 +22,7 @@ import (
"testing"
"time"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -110,7 +110,7 @@ func (l *testlistener) init(block uint64) {
atomic.AddUint32(&(l.initCount), 1)
}
-func (l *testlistener) onBlock(block v1.Block) {
+func (l *testlistener) onBlock(rpcs.EncodedBlockCert) {
atomic.AddUint32(&(l.blockCount), 1)
}
diff --git a/cmd/algoh/blockstats.go b/cmd/algoh/blockstats.go
index 74a629d32..590cfb435 100644
--- a/cmd/algoh/blockstats.go
+++ b/cmd/algoh/blockstats.go
@@ -19,8 +19,8 @@ package main
import (
"time"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/rpcs"
)
const downtimeLimit time.Duration = 5 * time.Minute
@@ -34,20 +34,21 @@ type blockstats struct {
func (stats *blockstats) init(block uint64) {
}
-func (stats *blockstats) onBlock(block v1.Block) {
+func (stats *blockstats) onBlock(block rpcs.EncodedBlockCert) {
now := time.Now()
+ blockHeader := block.Block.BlockHeader
// Ensure we only create stats from consecutive blocks.
- if stats.lastBlock+1 != block.Round {
- stats.lastBlock = block.Round
+ if stats.lastBlock+1 != uint64(blockHeader.Round) {
+ stats.lastBlock = uint64(blockHeader.Round)
stats.lastBlockTime = now
return
}
// Grab unique users.
users := make(map[string]bool)
- for _, tx := range block.Transactions.Transactions {
- users[tx.From] = true
+ for _, tx := range block.Block.Payset {
+ users[tx.Txn.Sender.String()] = true
}
duration := now.Sub(stats.lastBlockTime)
@@ -57,15 +58,15 @@ func (stats *blockstats) onBlock(block v1.Block) {
}
stats.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockStatsEvent, telemetryspec.BlockStatsEventDetails{
- Hash: block.Hash,
- OriginalProposer: block.Proposer,
- Round: block.Round,
- Transactions: uint64(len(block.Transactions.Transactions)),
+ Hash: block.Block.Hash().String(),
+ OriginalProposer: block.Certificate.Proposal.OriginalProposer.String(),
+ Round: uint64(blockHeader.Round),
+ Transactions: uint64(len(block.Block.Payset)),
ActiveUsers: uint64(len(users)),
AgreementDurationMs: uint64(duration.Nanoseconds() / 1000 / 1000),
NetworkDowntimeMs: uint64(downtime.Nanoseconds() / 1000 / 1000),
})
- stats.lastBlock = block.Round
+ stats.lastBlock = uint64(blockHeader.Round)
stats.lastBlockTime = now
}
diff --git a/cmd/algoh/blockstats_test.go b/cmd/algoh/blockstats_test.go
index d0cafaf3f..0398404dd 100644
--- a/cmd/algoh/blockstats_test.go
+++ b/cmd/algoh/blockstats_test.go
@@ -20,8 +20,11 @@ import (
"testing"
"time"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -31,7 +34,7 @@ type event struct {
identifier telemetryspec.Event
- details interface{}
+ details telemetryspec.BlockStatsEventDetails
}
type MockEventSender struct {
@@ -39,7 +42,12 @@ type MockEventSender struct {
}
func (mes *MockEventSender) EventWithDetails(category telemetryspec.Category, identifier telemetryspec.Event, details interface{}) {
- mes.events = append(mes.events, event{category: category, identifier: identifier, details: details})
+ mes.events = append(mes.events, event{category: category, identifier: identifier, details: details.(telemetryspec.BlockStatsEventDetails)})
+}
+
+// Helper method to create an EncodedBlockCert for the block handler.
+func makeTestBlock(round uint64) rpcs.EncodedBlockCert {
+ return rpcs.EncodedBlockCert{Block: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: basics.Round(round)}}}
}
func TestConsecutiveBlocks(t *testing.T) {
@@ -47,17 +55,57 @@ func TestConsecutiveBlocks(t *testing.T) {
sender := MockEventSender{}
bs := blockstats{log: &sender}
- bs.onBlock(v1.Block{Round: 300})
+ bs.onBlock(makeTestBlock(300))
// first consecutive block
- bs.onBlock(v1.Block{Round: 301})
+ bs.onBlock(makeTestBlock(301))
// reset
- bs.onBlock(v1.Block{Round: 303})
+ bs.onBlock(makeTestBlock(303))
// second consecutive block
- bs.onBlock(v1.Block{Round: 304})
+ bs.onBlock(makeTestBlock(304))
require.Equal(t, 2, len(sender.events))
}
+func TestEventWithDetails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ sender := MockEventSender{}
+ bs := blockstats{log: &sender}
+
+ // Create blocks with some senders in the payload.
+ makeStxnWithAddr := func(addr basics.Address) transactions.SignedTxnInBlock {
+ return transactions.SignedTxnInBlock{SignedTxnWithAD: transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: transactions.Transaction{Header: transactions.Header{Sender: addr}}}}}
+ }
+ addr := basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ otherAddr := basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+ // Check that only unique addrs are returned by ActiveUsers.
+ stxn1 := makeStxnWithAddr(addr)
+ stxn2 := makeStxnWithAddr(otherAddr)
+ stxn3 := makeStxnWithAddr(addr)
+ // Make block with some transactions.
+ testBlock := makeTestBlock(300)
+ testBlock.Block.Payset = transactions.Payset{stxn1, stxn2, stxn3}
+
+ bs.onBlock(makeTestBlock(299))
+ bs.onBlock(testBlock)
+ bs.onBlock(makeTestBlock(301))
+
+ testCases := []struct {
+ round uint64
+ activeUsers uint64
+ txns uint64
+ }{
+ {uint64(300), uint64(2), uint64(3)},
+ {uint64(301), uint64(0), uint64(0)},
+ }
+
+ require.Equal(t, 2, len(sender.events))
+ for i, event := range sender.events {
+ require.Equal(t, testCases[i].round, event.details.Round)
+ require.Equal(t, testCases[i].activeUsers, event.details.ActiveUsers)
+ require.Equal(t, testCases[i].txns, event.details.Transactions)
+ }
+}
+
func TestAgreementTime(t *testing.T) {
partitiontest.PartitionTest(t)
sleepTime := 50 * time.Millisecond
@@ -70,13 +118,13 @@ func TestAgreementTime(t *testing.T) {
bs := blockstats{log: &sender}
start := time.Now()
- bs.onBlock(v1.Block{Round: 300})
+ bs.onBlock(makeTestBlock(300))
time.Sleep(sleepTime)
- bs.onBlock(v1.Block{Round: 301})
+ bs.onBlock(makeTestBlock(301))
end := time.Now()
require.Equal(t, 1, len(sender.events))
- details := sender.events[0].details.(telemetryspec.BlockStatsEventDetails)
+ details := sender.events[0].details
// Test to see that the wait duration is at least the amount of time we slept
require.True(t, int(details.AgreementDurationMs) >= int(sleepTime)/int(time.Millisecond))
diff --git a/cmd/algoh/client.go b/cmd/algoh/client.go
index 89a7c27a3..6d25759ec 100644
--- a/cmd/algoh/client.go
+++ b/cmd/algoh/client.go
@@ -19,14 +19,13 @@ package main
import (
"context"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
)
// Client is a minimal interface for the RestClient
type Client interface {
- Status() (generatedV2.NodeStatusResponse, error)
- Block(round uint64) (v1.Block, error)
+ Status() (model.NodeStatusResponse, error)
+ RawBlock(round uint64) ([]byte, error)
GetGoRoutines(ctx context.Context) (string, error)
HealthCheck() error
}
diff --git a/cmd/algoh/deadman.go b/cmd/algoh/deadman.go
index 1c302e880..a15046f07 100644
--- a/cmd/algoh/deadman.go
+++ b/cmd/algoh/deadman.go
@@ -23,8 +23,8 @@ import (
"time"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/rpcs"
)
type deadManWatcher struct {
@@ -88,8 +88,8 @@ func (w deadManWatcher) run(initBlock uint64) {
}
}
-func (w deadManWatcher) onBlock(block v1.Block) {
- w.newBlockChan <- block.Round
+func (w deadManWatcher) onBlock(block rpcs.EncodedBlockCert) {
+ w.newBlockChan <- uint64(block.Block.BlockHeader.Round)
}
func (w deadManWatcher) reportDeadManTimeout(curBlock uint64) (err error) {
diff --git a/cmd/algoh/mockClient.go b/cmd/algoh/mockClient.go
index d9d81387e..ce532a4f4 100644
--- a/cmd/algoh/mockClient.go
+++ b/cmd/algoh/mockClient.go
@@ -20,26 +20,29 @@ import (
"context"
"fmt"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
)
//////////////////////////////////////
// Helpers to initialize mockClient //
//////////////////////////////////////
-func makeNodeStatuses(blocks ...uint64) (ret []generatedV2.NodeStatusResponse) {
- ret = make([]generatedV2.NodeStatusResponse, 0, len(blocks))
+func makeNodeStatuses(blocks ...uint64) (ret []model.NodeStatusResponse) {
+ ret = make([]model.NodeStatusResponse, 0, len(blocks))
for _, block := range blocks {
- ret = append(ret, generatedV2.NodeStatusResponse{LastRound: block})
+ ret = append(ret, model.NodeStatusResponse{LastRound: block})
}
return ret
}
-func makeBlocks(blocks ...uint64) (ret map[uint64]v1.Block) {
- ret = map[uint64]v1.Block{}
+func makeBlocks(blocks ...uint64) (ret map[uint64]rpcs.EncodedBlockCert) {
+ ret = map[uint64]rpcs.EncodedBlockCert{}
for _, block := range blocks {
- ret[block] = v1.Block{Round: block}
+ ret[block] = rpcs.EncodedBlockCert{Block: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: basics.Round(block)}}}
}
return ret
}
@@ -52,12 +55,12 @@ type mockClient struct {
GetGoRoutinesCalls int
HealthCheckCalls int
error []error
- status []generatedV2.NodeStatusResponse
+ status []model.NodeStatusResponse
routine []string
- block map[uint64]v1.Block
+ block map[uint64]rpcs.EncodedBlockCert
}
-func makeMockClient(error []error, status []generatedV2.NodeStatusResponse, block map[uint64]v1.Block, routine []string) mockClient {
+func makeMockClient(error []error, status []model.NodeStatusResponse, block map[uint64]rpcs.EncodedBlockCert, routine []string) mockClient {
return mockClient{
BlockCalls: make(map[uint64]int),
error: error,
@@ -79,7 +82,7 @@ func (c *mockClient) nextError() (e error) {
return
}
-func (c *mockClient) Status() (s generatedV2.NodeStatusResponse, e error) {
+func (c *mockClient) Status() (s model.NodeStatusResponse, e error) {
c.StatusCalls++
s = c.status[0]
// Repeat last status...
@@ -90,15 +93,16 @@ func (c *mockClient) Status() (s generatedV2.NodeStatusResponse, e error) {
return
}
-func (c *mockClient) Block(block uint64) (b v1.Block, e error) {
+func (c *mockClient) RawBlock(block uint64) (b []byte, e error) {
c.BlockCalls[block]++
e = c.nextError()
- b, ok := c.block[block]
+ bl, ok := c.block[block]
if !ok {
if e == nil {
e = fmt.Errorf("test is missing block %d", block)
}
}
+ b = protocol.EncodeReflect(bl)
return
}
diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go
index c1ec554e2..dfbe7bdf5 100644
--- a/cmd/catchpointdump/database.go
+++ b/cmd/catchpointdump/database.go
@@ -30,13 +30,16 @@ import (
)
var ledgerTrackerFilename string
+var ledgerTrackerStaging bool
func init() {
databaseCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )")
databaseCmd.Flags().StringVarP(&outFileName, "output", "o", "", "Specify an outfile for the dump ( i.e. ledger.dump.txt )")
+ databaseCmd.Flags().BoolVarP(&ledgerTrackerStaging, "staging", "s", false, "Specify whether to look in the catchpoint staging or regular tables. (default false)")
databaseCmd.AddCommand(checkCmd)
checkCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )")
+ checkCmd.Flags().BoolVarP(&ledgerTrackerStaging, "staging", "s", false, "Specify whether to look in the catchpoint staging or regular tables. (default false)")
}
var databaseCmd = &cobra.Command{
@@ -58,10 +61,14 @@ var databaseCmd = &cobra.Command{
}
defer outFile.Close()
}
- err = printAccountsDatabase(ledgerTrackerFilename, ledger.CatchpointFileHeader{}, outFile, nil)
+ err = printAccountsDatabase(ledgerTrackerFilename, ledgerTrackerStaging, ledger.CatchpointFileHeader{}, outFile, nil)
if err != nil {
reportErrorf("Unable to print account database : %v", err)
}
+ err = printKeyValueStore(ledgerTrackerFilename, ledgerTrackerStaging, outFile)
+ if err != nil {
+ reportErrorf("Unable to print key value store : %v", err)
+ }
},
}
@@ -99,7 +106,7 @@ func checkDatabase(databaseName string, outFile *os.File) error {
var stats merkletrie.Stats
err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- committer, err := ledger.MakeMerkleCommitter(tx, false)
+ committer, err := ledger.MakeMerkleCommitter(tx, ledgerTrackerStaging)
if err != nil {
return err
}
@@ -107,6 +114,11 @@ func checkDatabase(databaseName string, outFile *os.File) error {
if err != nil {
return err
}
+ root, err := trie.RootHash()
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(outFile, " Root: %s\n", root)
stats, err = trie.GetStats()
if err != nil {
return err
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index d0e181670..aed2dfc1f 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -19,8 +19,10 @@ package main
import (
"archive/tar"
"bufio"
+ "compress/gzip"
"context"
"database/sql"
+ "encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -34,6 +36,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
@@ -41,12 +44,12 @@ import (
"github.com/algorand/go-algorand/util/db"
)
-var tarFile string
+var catchpointFile string
var outFileName string
var excludedFields *cmdutil.CobraStringSliceValue = cmdutil.MakeCobraStringSliceValue(nil, []string{"version", "catchpoint"})
func init() {
- fileCmd.Flags().StringVarP(&tarFile, "tar", "t", "", "Specify the tar file to process")
+ fileCmd.Flags().StringVarP(&catchpointFile, "tar", "t", "", "Specify the catchpoint file (either .tar or .tar.gz) to process")
fileCmd.Flags().StringVarP(&outFileName, "output", "o", "", "Specify an outfile for the dump ( i.e. tracker.dump.txt )")
fileCmd.Flags().BoolVarP(&loadOnly, "load", "l", false, "Load only, do not dump")
fileCmd.Flags().VarP(excludedFields, "exclude-fields", "e", "List of fields to exclude from the dump: ["+excludedFields.AllowedString()+"]")
@@ -58,18 +61,18 @@ var fileCmd = &cobra.Command{
Long: "Specify a file to dump",
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
- if tarFile == "" {
+ if catchpointFile == "" {
cmd.HelpFunc()(cmd, args)
return
}
- stats, err := os.Stat(tarFile)
+ stats, err := os.Stat(catchpointFile)
if err != nil {
- reportErrorf("Unable to stat '%s' : %v", tarFile, err)
+ reportErrorf("Unable to stat '%s' : %v", catchpointFile, err)
}
- tarSize := stats.Size()
- if tarSize == 0 {
- reportErrorf("Empty file '%s' : %v", tarFile, err)
+ catchpointSize := stats.Size()
+ if catchpointSize == 0 {
+ reportErrorf("Empty file '%s' : %v", catchpointFile, err)
}
// TODO: store CurrentProtocol in catchpoint file header.
// As a temporary workaround use a current protocol version.
@@ -103,13 +106,13 @@ var fileCmd = &cobra.Command{
}
var fileHeader ledger.CatchpointFileHeader
- reader, err := os.Open(tarFile)
+ reader, err := os.Open(catchpointFile)
if err != nil {
- reportErrorf("Unable to read '%s' : %v", tarFile, err)
+ reportErrorf("Unable to read '%s' : %v", catchpointFile, err)
}
defer reader.Close()
- fileHeader, err = loadCatchpointIntoDatabase(context.Background(), catchupAccessor, reader, tarSize)
+ fileHeader, err = loadCatchpointIntoDatabase(context.Background(), catchupAccessor, reader, catchpointSize)
if err != nil {
reportErrorf("Unable to load catchpoint file into in-memory database : %v", err)
}
@@ -124,10 +127,14 @@ var fileCmd = &cobra.Command{
defer outFile.Close()
}
- err = printAccountsDatabase("./ledger.tracker.sqlite", fileHeader, outFile, excludedFields.GetSlice())
+ err = printAccountsDatabase("./ledger.tracker.sqlite", true, fileHeader, outFile, excludedFields.GetSlice())
if err != nil {
reportErrorf("Unable to print account database : %v", err)
}
+ err = printKeyValueStore("./ledger.tracker.sqlite", true, outFile)
+ if err != nil {
+ reportErrorf("Unable to print key value store : %v", err)
+ }
}
},
}
@@ -142,14 +149,49 @@ func printLoadCatchpointProgressLine(progress int, barLength int, dld int64) {
fmt.Printf(escapeCursorUp+escapeDeleteLine+outString+" %s\n", formatSize(dld))
}
-func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.CatchpointCatchupAccessor, tarFile io.Reader, tarSize int64) (fileHeader ledger.CatchpointFileHeader, err error) {
+func isGzipCompressed(catchpointReader *bufio.Reader, catchpointFileSize int64) bool {
+ const gzipPrefixSize = 2
+ const gzipPrefix = "\x1F\x8B"
+
+ if catchpointFileSize < gzipPrefixSize {
+ return false
+ }
+
+ prefixBytes, err := catchpointReader.Peek(gzipPrefixSize)
+
+ if err != nil {
+ return false
+ }
+
+ return prefixBytes[0] == gzipPrefix[0] && prefixBytes[1] == gzipPrefix[1]
+}
+
+func getCatchpointTarReader(catchpointReader *bufio.Reader, catchpointFileSize int64) (*tar.Reader, error) {
+ if isGzipCompressed(catchpointReader, catchpointFileSize) {
+ gzipReader, err := gzip.NewReader(catchpointReader)
+ if err != nil {
+ return nil, err
+ }
+
+ return tar.NewReader(gzipReader), nil
+ }
+
+ return tar.NewReader(catchpointReader), nil
+}
+
+func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.CatchpointCatchupAccessor, catchpointFile io.Reader, catchpointFileSize int64) (fileHeader ledger.CatchpointFileHeader, err error) {
fmt.Printf("\n")
printLoadCatchpointProgressLine(0, 50, 0)
lastProgressUpdate := time.Now()
progress := uint64(0)
defer printLoadCatchpointProgressLine(0, 0, 0)
- tarReader := tar.NewReader(tarFile)
+ catchpointReader := bufio.NewReader(catchpointFile)
+ tarReader, err := getCatchpointTarReader(catchpointReader, catchpointFileSize)
+ if err != nil {
+ return fileHeader, err
+ }
+
var downloadProgress ledger.CatchpointCatchupAccessorProgress
for {
header, err := tarReader.Next()
@@ -176,7 +218,7 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc
return fileHeader, err
}
}
- err = catchupAccessor.ProgressStagingBalances(ctx, header.Name, balancesBlockBytes, &downloadProgress)
+ err = catchupAccessor.ProcessStagingBalances(ctx, header.Name, balancesBlockBytes, &downloadProgress)
if err != nil {
return fileHeader, err
}
@@ -184,9 +226,9 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc
// we already know it's valid, since we validated that above.
protocol.Decode(balancesBlockBytes, &fileHeader)
}
- if time.Since(lastProgressUpdate) > 50*time.Millisecond && tarSize > 0 {
+ if time.Since(lastProgressUpdate) > 50*time.Millisecond && catchpointFileSize > 0 {
lastProgressUpdate = time.Now()
- printLoadCatchpointProgressLine(int(float64(progress)*50.0/float64(tarSize)), 50, int64(progress))
+ printLoadCatchpointProgressLine(int(float64(progress)*50.0/float64(catchpointFileSize)), 50, int64(progress))
}
}
}
@@ -204,7 +246,7 @@ func printDumpingCatchpointProgressLine(progress int, barLength int, dld int64)
fmt.Printf(escapeCursorUp + escapeDeleteLine + outString + "\n")
}
-func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFileHeader, outFile *os.File, excludeFields []string) error {
+func printAccountsDatabase(databaseName string, stagingTables bool, fileHeader ledger.CatchpointFileHeader, outFile *os.File, excludeFields []string) error {
lastProgressUpdate := time.Now()
progress := uint64(0)
defer printDumpingCatchpointProgressLine(0, 0, 0)
@@ -224,6 +266,7 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
"Block Header Digest: %s",
"Catchpoint: %s",
"Total Accounts: %d",
+ "Total KVs: %d",
"Total Chunks: %d",
}
var headerValues = []interface{}{
@@ -233,6 +276,7 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
fileHeader.BlockHeaderDigest.String(),
fileHeader.Catchpoint,
fileHeader.TotalAccounts,
+ fileHeader.TotalKVs,
fileHeader.TotalChunks,
}
// safety check
@@ -280,6 +324,9 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
if fileHeader.Version == 0 {
var totals ledgercore.AccountTotals
id := ""
+ if stagingTables {
+ id = "catchpointStaging"
+ }
row := tx.QueryRow("SELECT online, onlinerewardunits, offline, offlinerewardunits, notparticipating, notparticipatingrewardunits, rewardslevel FROM accounttotals WHERE id=?", id)
err = row.Scan(&totals.Online.Money.Raw, &totals.Online.RewardUnits,
&totals.Offline.Money.Raw, &totals.Offline.RewardUnits,
@@ -297,7 +344,7 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
balancesTable := "accountbase"
resourcesTable := "resources"
- if fileHeader.Version != 0 {
+ if stagingTables {
balancesTable = "catchpointbalances"
resourcesTable = "catchpointresources"
}
@@ -323,7 +370,7 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
return nil
}
- if fileHeader.Version < ledger.CatchpointFileVersionV6 {
+ if fileHeader.Version != 0 && fileHeader.Version < ledger.CatchpointFileVersionV6 {
var rows *sql.Rows
rows, err = tx.Query(fmt.Sprintf("SELECT address, data FROM %s order by address", balancesTable))
if err != nil {
@@ -378,9 +425,71 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
return fmt.Errorf("expected %d accounts but got only %d", rowsCount, acctCount)
}
}
-
// increase the deadline warning to disable the warning message.
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(5*time.Second))
+ _, _ = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(5*time.Second))
+ return err
+ })
+}
+
+func printKeyValue(writer *bufio.Writer, key, value []byte) {
+ var pretty string
+ ai, rest, err := logic.SplitBoxKey(string(key))
+ if err == nil {
+ pretty = fmt.Sprintf("box(%d, %s)", ai, base64.StdEncoding.EncodeToString([]byte(rest)))
+ } else {
+ pretty = base64.StdEncoding.EncodeToString(key)
+ }
+
+ fmt.Fprintf(writer, "%s : %v\n", pretty, base64.StdEncoding.EncodeToString(value))
+}
+
+func printKeyValueStore(databaseName string, stagingTables bool, outFile *os.File) error {
+ fmt.Printf("\n")
+ printDumpingCatchpointProgressLine(0, 50, 0)
+ lastProgressUpdate := time.Now()
+ progress := uint64(0)
+ defer printDumpingCatchpointProgressLine(0, 0, 0)
+
+ fileWriter := bufio.NewWriterSize(outFile, 1024*1024)
+ defer fileWriter.Flush()
+
+ dbAccessor, err := db.MakeAccessor(databaseName, true, false)
+ if err != nil || dbAccessor.Handle == nil {
return err
+ }
+
+ kvTable := "kvstore"
+ if stagingTables {
+ kvTable = "catchpointkvstore"
+ }
+
+ return dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var rowsCount int64
+ err := tx.QueryRow(fmt.Sprintf("SELECT count(*) from %s", kvTable)).Scan(&rowsCount)
+ if err != nil {
+ return err
+ }
+
+ // ordered to make dumps more "diffable"
+ rows, err := tx.Query(fmt.Sprintf("SELECT key, value FROM %s order by key", kvTable))
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+ for rows.Next() {
+ progress++
+ var key []byte
+ var value []byte
+ err := rows.Scan(&key, &value)
+ if err != nil {
+ return err
+ }
+ printKeyValue(fileWriter, key, value)
+ if time.Since(lastProgressUpdate) > 50*time.Millisecond {
+ lastProgressUpdate = time.Now()
+ printDumpingCatchpointProgressLine(int(float64(progress)*50.0/float64(rowsCount)), 50, int64(progress))
+ }
+ }
+ return nil
})
}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 9073ece66..e63afb187 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -353,10 +353,15 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
return err
}
defer outFile.Close()
- err = printAccountsDatabase("./ledger.tracker.sqlite", fileHeader, outFile, excludedFields.GetSlice())
+ err = printAccountsDatabase("./ledger.tracker.sqlite", true, fileHeader, outFile, excludedFields.GetSlice())
if err != nil {
return err
}
+ err = printKeyValueStore("./ledger.tracker.sqlite", true, outFile)
+ if err != nil {
+ return err
+ }
+
}
return nil
}
diff --git a/cmd/dispenser/index.html.tpl b/cmd/dispenser/index.html.tpl
new file mode 100644
index 000000000..ec53973b8
--- /dev/null
+++ b/cmd/dispenser/index.html.tpl
@@ -0,0 +1,80 @@
+<!DOCTYPE html>
+ <head>
+ <title>Algorand dispenser</title>
+ <script src='https://www.google.com/recaptcha/api.js'>
+ </script>
+ <script src="https://code.jquery.com/jquery-3.3.1.min.js"
+ integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
+ crossorigin="anonymous">
+ </script>
+ <script>
+ var ADDRESS_REGEX = /[A-Z0-9]{58}/
+
+ function sanitize(string) {
+ const entityMap = {
+ '&': '&amp;',
+ '<': '&lt;',
+ '>': '&gt;',
+ '"': '&quot;',
+ "'": '&#39;',
+ '/': '&#x2F;',
+ '`': '&#x60;',
+ '=': '&#x3D;'
+ };
+ return String(string).replace(/[&<>"'`=\/]/g, function (s) {
+ return entityMap[s];
+ });
+ }
+
+ function loadparam() {
+ const queryString = window.location.search;
+ const urlParams = new URLSearchParams(queryString);
+ const account = sanitize(urlParams.get('account'))
+
+ if (ADDRESS_REGEX.test(account)) {
+ $('#target').val(account);
+ }
+ }
+
+ function onload() {
+ loadparam();
+ $('#dispense').click(function(e) {
+ var recaptcha = grecaptcha.getResponse();
+ var target = sanitize($('#target').val());
+
+ if (ADDRESS_REGEX.test(target)) {
+ $('#status').html('Sending request..');
+ var req = $.post('/dispense', {
+ recaptcha: recaptcha,
+ target: target,
+ }, function(data) {
+ $('#status').text('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
+ }).fail(function() {
+ $('#status').text('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
+ });
+ }
+ else {
+ $('#status').text('Please enter a valid Algorand address')
+ }
+ });
+ }
+ </script>
+ </head>
+ <body onload="onload()">
+ <h1>Algorand dispenser</h1>
+ <div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
+ </div>
+ <div>
+ <p>The dispensed Algos have no monetary value and should only be used to test applications.</p>
+ <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.</p>
+ <p>Please do not abuse it by requesting more Algos than needed.</p>
+ </div>
+ <div>
+ <input id="target" placeholder="target address" size="80">
+ <button id="dispense">Dispense</button>
+ </div>
+ <div>
+ Status: <span id="status"></span>
+ </div>
+ </body>
+</html>
diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go
index d4ec0b5b8..7890ab8f4 100644
--- a/cmd/dispenser/server.go
+++ b/cmd/dispenser/server.go
@@ -17,6 +17,9 @@
package main
import (
+ _ "embed"
+ "html"
+
// "bytes"
"encoding/json"
"flag"
@@ -63,61 +66,8 @@ type dispenserSiteConfig struct {
topPage string
}
-const topPageTemplate = `
-<html>
- <head>
- <title>Algorand dispenser</title>
- <script src='https://www.google.com/recaptcha/api.js'>
- </script>
- <script src="https://code.jquery.com/jquery-3.3.1.min.js"
- integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
- crossorigin="anonymous">
- </script>
- <script>
- function loadparam() {
- const queryString = window.location.search;
- const urlParams = new URLSearchParams(queryString);
- $('#target').val(urlParams.get('account'));
- }
-
- function onload() {
- loadparam();
- $('#dispense').click(function(e) {
- var recaptcha = grecaptcha.getResponse();
- var target = $('#target').val();
-
- $('#status').html('Sending request..');
- var req = $.post('/dispense', {
- recaptcha: recaptcha,
- target: target,
- }, function(data) {
- $('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
- }).fail(function() {
- $('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
- });
- });
- }
- </script>
- </head>
- <body onload="onload()">
- <h1>Algorand dispenser</h1>
- <div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
- </div>
- <div>
- <p>The dispensed Algos have no monetary value and should only be used to test applications.</p>
- <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.</p>
- <p>Please do not abuse it by requesting more Algos than needed.</p>
- </div>
- <div>
- <input id="target" placeholder="target address" size="80">
- <button id="dispense">Dispense</button>
- </div>
- <div>
- Status: <span id="status"></span>
- </div>
- </body>
-</html>
-`
+//go:embed index.html.tpl
+var topPageTemplate string
func getConfig(r *http.Request) dispenserSiteConfig {
return configMap[r.Host]
@@ -190,7 +140,7 @@ func dispense(w http.ResponseWriter, r *http.Request) {
return
}
- target := targets[0]
+ target := html.EscapeString(targets[0])
c, ok := client[r.Host]
if !ok {
diff --git a/cmd/goal/README.md b/cmd/goal/README.md
new file mode 100644
index 000000000..2ecebf65b
--- /dev/null
+++ b/cmd/goal/README.md
@@ -0,0 +1,147 @@
+# Example `goal` Snippets
+
+Unless otherwise noted, it is assumed that the working directory
+begins at the top level of the `go-algorand` repo.
+
+It is also assumed that the main README's installation instructions have been followed and `make install` run so that the `goal` executable has been rebuilt from the same source as this example and is available in the shell environment.
+You can run `ls -l $(which goal)` after your `make install` and look at the installation time as a sanity check.
+
+Finally, all the `goal` commands assume that `${ALGORAND_DATA}` has been set. See the first Q/A for how this is done.
+
+## Starting a Single Node Dev Network
+
+### Q: Having just completed a new build in go-algorand, how do I get a single node dev network up, with algos in an easily accessible wallet from goal?
+
+### A:
+
+```sh
+# set this to where you want to keep the network files (and data dirs will go beneath)
+NETWORKS=~/networks
+
+# create a networks directory if you don't already have it
+mkdir -p ${NETWORKS}
+
+# set this to "name" your network
+NAME=niftynetwork
+
+# assuming here that are currently working out of the root directory of the go-algorand repo
+goal network create -n ${NAME} -r ${NETWORKS}/${NAME} -t ./test/testdata/nettemplates/OneNodeFuture.json
+
+# after the next command and for the rest of the README, we assume that `${ALGORAND_DATA}` is set
+export ALGORAND_DATA=${NETWORKS}/${NAME}/Primary
+echo $ALGORAND_DATA
+
+# start the network
+goal node start
+
+# see if it worked (run a few times, note block increasing)
+goal node status
+sleep 4 # assuming you're copy/pasting this entire block
+goal node status
+sleep 4
+goal node status
+
+# find the account with all the money
+goal account list
+
+# put it in a variable
+ACCOUNT=`goal account list | awk '{print $2}'`
+echo $ACCOUNT
+
+# send some money from the account to itself
+goal clerk send --to ${ACCOUNT} --from ${ACCOUNT} --amount 10
+```
+
+## Creating Applications
+
+### Q: How do I use goal to create an app?
+
+### A:
+Here's an example with the following assumptions:
+* all the setup is as in the first question
+* the approval program (which tests box functionality) has relative path `cmd/goal/examples/boxes.teal`
+* the clear program has relative path `cmd/goal/examples/clear.teal`
+* there are no local or global storage requirements
+
+```sh
+TEALDIR=cmd/goal/examples
+echo $TEALDIR
+
+# create the app and TAKE NOTE of its "app index"
+goal app create --creator ${ACCOUNT} --approval-prog ${TEALDIR}/boxes.teal --clear-prog ${TEALDIR}/clear.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0
+```
+
+For the following questions, you'll need to use the app index. That will be shown in the last line printed. EG:
+
+```sh
+Attempting to create app (approval size 125, hash RKWO3VXBKQXF77PC6EHRLFXD4YTJYTJTGPTPWQ46YH5ESGPZ5JIA; clear size 3, hash IS4FW6ZCRMQRTDIINAVAQHD2GK6DXUNQHQ52IQGZEVPP4OEU56QA)
+Issued transaction from account ECRQFXZ7P3PLNK6QLIEVX7AXU6NTVQZHFUSEXTXMBKKOA2NTIV4PCX7XNY, txid SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA (fee 1000)
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA still pending as of round 12
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA still pending as of round 13
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA committed in round 14
+Created app with app index 2
+```
+
+## Funding App-Accounts
+
+### Q: How do I fund the app account so that it can satisfy its boxes min-balance requirement and allow for box creation?
+
+### A:
+Assuming you followed the previous step, and that the _app index_ is 2:
+
+```sh
+# store the app index for later usage
+APPID=2
+echo $APPID
+
+# store the app's account address into a variable
+APP_ACCOUNT=`goal app info --app-id ${APPID} | grep "Application account" | awk '{print $3}'`
+echo $APP_ACCOUNT
+
+# fund the app's account (here we're being very conservative and sending 10 algos)
+goal clerk send --to ${APP_ACCOUNT} --from ${ACCOUNT} --amount 10000000
+
+# verify the balance of the app's account
+goal account balance --address ${APP_ACCOUNT}
+```
+
+## Application Boxes in `goal`
+
+### Q: How do I use boxes in goal? In particular, I'd like to make a goal app call which:
+* accesses a particular box for a particular app
+* stores an ABI type as its contents
+
+### A:
+Here's an example with the following assumptions:
+
+* the caller's account is given by `${ACCOUNT}` (see first answer)
+* the program used is `boxes.teal` referenced above. In particular:
+ * it routes to box subroutines using the app argument at index 0 as the method signifier
+* the app id has been stored in `${APPID}` (see the previous answer)
+* the box referenced in the first non-create app-call has name `greatBox`
+* another referenced box is named `an_ABI_box`
+ * this second box is provided contents `[2,3,5]` of ABI-type `(uint8,uint8,uint8)`
+
+```sh
+# create a box with a simple non-ABI name. Note how the `--box` flag needs to be set so as to refer to the box being touched
+goal app call --from $ACCOUNT --app-id ${APPID} --box "str:greatBox" --app-arg "str:create" --app-arg "str:greatBox"
+
+# create another box
+goal app call --from ${ACCOUNT} --app-id ${APPID} --box "str:an_ABI_box" --app-arg "str:create" --app-arg "str:an_ABI_box"
+
+# set the contents to ABI type `(uint8,uint8,uint8)` with value `[2,3,5]`
+goal app call --from ${ACCOUNT} --app-id ${APPID} --box "str:an_ABI_box" --app-arg "str:set" --app-arg "str:an_ABI_box" --app-arg "abi:(uint8,uint8,uint8):[2,3,5]"
+```
+
+### Q: How do I search for boxes in goal?
+
+### A:
+Assuming you followed the previous step to create `greatBox` and `an_ABI_box`:
+
+```sh
+# get all boxes for a given app
+goal app box list --app-id ${APPID}
+
+# get the box details for a given box
+goal app box info --app-id ${APPID} --name "str:an_ABI_box"
+```
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 62f1b08c8..5cb3f416c 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -31,7 +31,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/passphrase"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -59,7 +59,6 @@ var (
partKeyOutDir string
partKeyFile string
partKeyDeleteInput bool
- partkeyCompat bool
importDefault bool
mnemonic string
dumpOutFile string
@@ -169,12 +168,6 @@ func init() {
installParticipationKeyCmd.MarkFlagRequired("partkey")
installParticipationKeyCmd.Flags().BoolVar(&partKeyDeleteInput, "delete-input", false, "Acknowledge that installpartkey will delete the input key file")
- // listpartkey flags
- listParticipationKeysCmd.Flags().BoolVarP(&partkeyCompat, "compatibility", "c", false, "Print output in compatibility mode. This option will be removed in a future release, please use REST API for tooling.")
-
- // partkeyinfo flags
- partkeyInfoCmd.Flags().BoolVarP(&partkeyCompat, "compatibility", "c", false, "Print output in compatibility mode. This option will be removed in a future release, please use REST API for tooling.")
-
// import flags
importCmd.Flags().BoolVarP(&importDefault, "default", "f", false, "Set this account as the default one")
importCmd.Flags().StringVarP(&mnemonic, "mnemonic", "m", "", "Mnemonic to import (will prompt otherwise)")
@@ -487,7 +480,7 @@ var listCmd = &cobra.Command{
// For each address, request information about it from algod
for _, addr := range addrs {
- response, _ := client.AccountInformationV2(addr.Addr, true)
+ response, _ := client.AccountInformation(addr.Addr, true)
// it's okay to proceed without algod info
// Display this information to the user
@@ -523,7 +516,7 @@ var infoCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
dataDir := ensureSingleDataDir()
client := ensureAlgodClient(dataDir)
- response, err := client.AccountInformationV2(accountAddress, true)
+ response, err := client.AccountInformation(accountAddress, true)
if err != nil {
reportErrorf(errorRequestFail, err)
}
@@ -535,37 +528,37 @@ var infoCmd = &cobra.Command{
},
}
-func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bool, account generatedV2.Account) bool {
- var createdAssets []generatedV2.Asset
+func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bool, account model.Account) bool {
+ var createdAssets []model.Asset
if account.CreatedAssets != nil {
- createdAssets = make([]generatedV2.Asset, len(*account.CreatedAssets))
+ createdAssets = make([]model.Asset, len(*account.CreatedAssets))
copy(createdAssets, *account.CreatedAssets)
sort.Slice(createdAssets, func(i, j int) bool {
return createdAssets[i].Index < createdAssets[j].Index
})
}
- var heldAssets []generatedV2.AssetHolding
+ var heldAssets []model.AssetHolding
if account.Assets != nil {
- heldAssets = make([]generatedV2.AssetHolding, len(*account.Assets))
+ heldAssets = make([]model.AssetHolding, len(*account.Assets))
copy(heldAssets, *account.Assets)
sort.Slice(heldAssets, func(i, j int) bool {
- return heldAssets[i].AssetId < heldAssets[j].AssetId
+ return heldAssets[i].AssetID < heldAssets[j].AssetID
})
}
- var createdApps []generatedV2.Application
+ var createdApps []model.Application
if account.CreatedApps != nil {
- createdApps = make([]generatedV2.Application, len(*account.CreatedApps))
+ createdApps = make([]model.Application, len(*account.CreatedApps))
copy(createdApps, *account.CreatedApps)
sort.Slice(createdApps, func(i, j int) bool {
return createdApps[i].Id < createdApps[j].Id
})
}
- var optedInApps []generatedV2.ApplicationLocalState
+ var optedInApps []model.ApplicationLocalState
if account.AppsLocalState != nil {
- optedInApps = make([]generatedV2.ApplicationLocalState, len(*account.AppsLocalState))
+ optedInApps = make([]model.ApplicationLocalState, len(*account.AppsLocalState))
copy(optedInApps, *account.AppsLocalState)
sort.Slice(optedInApps, func(i, j int) bool {
return optedInApps[i].Id < optedInApps[j].Id
@@ -591,7 +584,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
_, units = unicodePrintable(*createdAsset.Params.UnitName)
}
- total := assetDecimalsFmt(createdAsset.Params.Total, uint32(createdAsset.Params.Decimals))
+ total := assetDecimalsFmt(createdAsset.Params.Total, createdAsset.Params.Decimals)
url := ""
if createdAsset.Params.Url != nil {
@@ -608,17 +601,17 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
}
for _, assetHolding := range heldAssets {
if onlyShowAssetIds {
- fmt.Fprintf(report, "\tID %d\n", assetHolding.AssetId)
+ fmt.Fprintf(report, "\tID %d\n", assetHolding.AssetID)
continue
}
- assetParams, err := client.AssetInformationV2(assetHolding.AssetId)
+ assetParams, err := client.AssetInformation(assetHolding.AssetID)
if err != nil {
hasError = true
- fmt.Fprintf(errorReport, "Error: Unable to retrieve asset information for asset %d referred to by account %s: %v\n", assetHolding.AssetId, address, err)
- fmt.Fprintf(report, "\tID %d, error\n", assetHolding.AssetId)
+ fmt.Fprintf(errorReport, "Error: Unable to retrieve asset information for asset %d referred to by account %s: %v\n", assetHolding.AssetID, address, err)
+ fmt.Fprintf(report, "\tID %d, error\n", assetHolding.AssetID)
}
- amount := assetDecimalsFmt(assetHolding.Amount, uint32(assetParams.Params.Decimals))
+ amount := assetDecimalsFmt(assetHolding.Amount, assetParams.Params.Decimals)
assetName := "<unnamed>"
if assetParams.Params.Name != nil {
@@ -635,7 +628,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
frozen = " (frozen)"
}
- fmt.Fprintf(report, "\tID %d, %s, balance %s %s%s\n", assetHolding.AssetId, assetName, amount, unitName, frozen)
+ fmt.Fprintf(report, "\tID %d, %s, balance %s %s%s\n", assetHolding.AssetID, assetName, amount, unitName, frozen)
}
fmt.Fprintln(report, "Created Apps:")
@@ -713,7 +706,7 @@ var balanceCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
dataDir := ensureSingleDataDir()
client := ensureAlgodClient(dataDir)
- response, err := client.AccountInformationV2(accountAddress, false)
+ response, err := client.AccountInformation(accountAddress, false)
if err != nil {
reportErrorf(errorRequestFail, err)
}
@@ -758,7 +751,7 @@ var rewardsCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
dataDir := ensureSingleDataDir()
client := ensureAlgodClient(dataDir)
- response, err := client.AccountInformationV2(accountAddress, false)
+ response, err := client.AccountInformation(accountAddress, false)
if err != nil {
reportErrorf(errorRequestFail, err)
}
@@ -1046,7 +1039,7 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
- renewAccounts := make(map[string]generatedV2.ParticipationKey)
+ renewAccounts := make(map[string]model.ParticipationKey)
for _, part := range parts {
if existing, has := renewAccounts[part.Address]; has {
if existing.Key.VoteFirstValid >= part.Key.VoteLastValid {
@@ -1120,10 +1113,6 @@ var listParticipationKeysCmd = &cobra.Command{
Long: `List all participation keys tracked by algod along with summary of additional information. For detailed key information use 'partkeyinfo'.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
- if partkeyCompat {
- legacyListParticipationKeysCommand()
- return
- }
dataDir := ensureSingleDataDir()
client := ensureGoalClient(dataDir, libgoal.DynamicClient)
@@ -1137,15 +1126,15 @@ var listParticipationKeysCmd = &cobra.Command{
fmt.Printf(rowFormat, "Registered", "Account", "ParticipationID", "Last Used", "First round", "Last round")
for _, part := range parts {
onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(part.Address)
+ onlineAccountInfo, err := client.AccountInformation(part.Address, false)
if err == nil {
votingBytes := part.Key.VoteParticipationKey
vrfBytes := part.Key.SelectionParticipationKey
if onlineAccountInfo.Participation != nil &&
- (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
- (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == part.Key.VoteFirstValid) &&
- (onlineAccountInfo.Participation.VoteLast == part.Key.VoteLastValid) &&
+ (string(onlineAccountInfo.Participation.VoteParticipationKey) == string(votingBytes[:])) &&
+ (string(onlineAccountInfo.Participation.SelectionParticipationKey) == string(vrfBytes[:])) &&
+ (onlineAccountInfo.Participation.VoteFirstValid == part.Key.VoteFirstValid) &&
+ (onlineAccountInfo.Participation.VoteLastValid == part.Key.VoteLastValid) &&
(onlineAccountInfo.Participation.VoteKeyDilution == part.Key.VoteKeyDilution) {
onlineInfoStr = "yes"
} else {
@@ -1372,11 +1361,6 @@ var partkeyInfoCmd = &cobra.Command{
Long: `Output details about all available part keys in the specified data directory(ies), such as key validity period.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
- if partkeyCompat {
- legacyPartkeyInfoCommand()
- return
- }
-
onDataDirs(func(dataDir string) {
fmt.Printf("Dumping participation key info from %s...\n", dataDir)
client := ensureAlgodClient(dataDir)
@@ -1462,138 +1446,3 @@ var markNonparticipatingCmd = &cobra.Command{
}
},
}
-
-// listParticipationKeyFiles returns the available participation keys,
-// as a map from database filename to Participation key object.
-// DEPRECATED
-func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algodAcct.Participation, err error) {
- genID, err := c.GenesisID()
- if err != nil {
- return
- }
-
- // Get a list of files in the participation keys directory
- keyDir := filepath.Join(c.DataDir(), genID)
- files, err := os.ReadDir(keyDir)
- if err != nil {
- return
- }
-
- partKeyFiles = make(map[string]algodAcct.Participation)
- for _, file := range files {
- // If it can't be a participation key database, skip it
- if !config.IsPartKeyFilename(file.Name()) {
- continue
- }
-
- filename := file.Name()
-
- // Fetch a handle to this database
- handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename))
- if err != nil {
- // Couldn't open it, skip it
- continue
- }
-
- // Fetch an account.Participation from the database
- part, err := algodAcct.RestoreParticipation(handle)
- if err != nil {
- // Couldn't read it, skip it
- handle.Close()
- continue
- }
-
- partKeyFiles[filename] = part.Participation
- part.Close()
- }
-
- return
-}
-
-// legacyListParticipationKeysCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to the REST API.
-// DEPRECATED
-func legacyListParticipationKeysCommand() {
- dataDir := ensureSingleDataDir()
-
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
- parts, err := listParticipationKeyFiles(&client)
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- var filenames []string
- for fn := range parts {
- filenames = append(filenames, fn)
- }
- sort.Strings(filenames)
-
- rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
- fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
- for _, fn := range filenames {
- onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
- if err == nil {
- votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
- vrfBytes := parts[fn].VRF.PK
- if onlineAccountInfo.Participation != nil &&
- (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
- (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
- (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
- (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
- onlineInfoStr = "yes"
- } else {
- onlineInfoStr = "no"
- }
- }
- // it's okay to proceed without algod info
- first, last := parts[fn].ValidInterval()
- fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
- fmt.Sprintf("%d", first),
- fmt.Sprintf("%d", last),
- fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
- }
-}
-
-// legacyPartkeyInfoCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to alternatives.
-// DEPRECATED
-func legacyPartkeyInfoCommand() {
- type partkeyInfo struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Address string `codec:"acct"`
- FirstValid basics.Round `codec:"first"`
- LastValid basics.Round `codec:"last"`
- VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
- SelectionID crypto.VRFVerifier `codec:"sel"`
- VoteKeyDilution uint64 `codec:"voteKD"`
- }
-
- onDataDirs(func(dataDir string) {
- fmt.Printf("Dumping participation key info from %s...\n", dataDir)
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
-
- // Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := listParticipationKeyFiles(&client)
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- for filename, part := range parts {
- fmt.Println(strings.Repeat("-", 40))
- info := partkeyInfo{
- Address: part.Address().String(),
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
- SelectionID: part.VRFSecrets().PK,
- VoteKeyDilution: part.KeyDilution,
- }
- infoString := protocol.EncodeJSON(&info)
- fmt.Printf("File: %s\n%s\n", filename, string(infoString))
- }
- })
-}
diff --git a/cmd/goal/accountsList.go b/cmd/goal/accountsList.go
index dc646ffb0..366a132c6 100644
--- a/cmd/goal/accountsList.go
+++ b/cmd/goal/accountsList.go
@@ -24,7 +24,7 @@ import (
"path/filepath"
"strings"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/libgoal"
)
@@ -204,7 +204,7 @@ func (accountList *AccountsList) loadList() {
}
}
-func (accountList *AccountsList) outputAccount(addr string, acctInfo generatedV2.Account, multisigInfo *libgoal.MultisigInfo) {
+func (accountList *AccountsList) outputAccount(addr string, acctInfo model.Account, multisigInfo *libgoal.MultisigInfo) {
if acctInfo.Address == "" {
fmt.Printf("[n/a]\t%s\t%s\t[n/a] microAlgos", accountList.getNameByAddress(addr), addr)
} else {
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 58ac78f75..e58598bcc 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -19,9 +19,6 @@ package main
import (
"bytes"
"crypto/sha512"
- "encoding/base32"
- "encoding/base64"
- "encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
@@ -72,6 +69,7 @@ var (
// platform seems not so far-fetched?
foreignApps []string
foreignAssets []string
+ appBoxes []string // parse these as we do app args, with optional number and comma in front
appStrAccounts []string
appArgs []string
@@ -98,8 +96,9 @@ func init() {
appCmd.PersistentFlags().StringArrayVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
appCmd.PersistentFlags().StringSliceVar(&foreignApps, "foreign-app", nil, "Indexes of other apps whose global state is read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&foreignAssets, "foreign-asset", nil, "Indexes of assets whose parameters are read in this transaction")
+ appCmd.PersistentFlags().StringArrayVar(&appBoxes, "box", nil, "Boxes that may be accessed by this transaction. Use the same form as app-arg to name the box, preceded by an optional app-id and comma. No app-id indicates the box is accessible by the app being called.")
appCmd.PersistentFlags().StringSliceVar(&appStrAccounts, "app-account", nil, "Accounts that may be accessed from application logic")
- appCmd.PersistentFlags().StringVarP(&appInputFilename, "app-input", "i", "", "JSON file containing encoded arguments and inputs (mutually exclusive with app-arg-b64 and app-account)")
+ appCmd.PersistentFlags().StringVarP(&appInputFilename, "app-input", "i", "", "JSON file containing encoded arguments and inputs (mutually exclusive with app-arg, app-account, foreign-app, foreign-asset, and box)")
appCmd.PersistentFlags().StringVar(&approvalProgFile, "approval-prog", "", "(Uncompiled) TEAL assembly program filename for approving/rejecting transactions")
appCmd.PersistentFlags().StringVar(&clearProgFile, "clear-prog", "", "(Uncompiled) TEAL assembly program filename for updating application state when a user clears their local state")
@@ -199,16 +198,51 @@ func panicIfErr(err error) {
}
}
-type appCallArg struct {
- Encoding string `codec:"encoding"`
- Value string `codec:"value"`
+func newAppCallBytes(arg string) logic.AppCallBytes {
+ appBytes, err := logic.NewAppCallBytes(arg)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ return appBytes
}
type appCallInputs struct {
- Accounts []string `codec:"accounts"`
- ForeignApps []uint64 `codec:"foreignapps"`
- ForeignAssets []uint64 `codec:"foreignassets"`
- Args []appCallArg `codec:"args"`
+ Accounts []string `codec:"accounts"`
+ ForeignApps []uint64 `codec:"foreignapps"`
+ ForeignAssets []uint64 `codec:"foreignassets"`
+ Boxes []boxRef `codec:"boxes"`
+ Args []logic.AppCallBytes `codec:"args"`
+}
+
+type boxRef struct {
+ appID uint64 `codec:"app"`
+ name logic.AppCallBytes `codec:"name"`
+}
+
+// newBoxRef parses a command-line box ref, which is an optional appId, a comma,
+// and then the same format as an app call arg.
+func newBoxRef(arg string) boxRef {
+ parts := strings.SplitN(arg, ":", 2)
+ if len(parts) != 2 {
+ reportErrorf("box refs should be of the form '[<app>,]encoding:value'")
+ }
+ encoding := parts[0] // tentative, may be <app>,<encoding>
+ value := parts[1]
+ parts = strings.SplitN(encoding, ",", 2)
+ appID := uint64(0)
+ if len(parts) == 2 {
+ // There was a comma in the part before the ":"
+ encoding = parts[1]
+ var err error
+ appID, err = strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ reportErrorf("Could not parse app id in box ref: %v", err)
+ }
+ }
+ return boxRef{
+ appID: appID,
+ name: newAppCallBytes(encoding + ":" + value),
+ }
}
func stringsToUint64(strs []string) []uint64 {
@@ -223,78 +257,60 @@ func stringsToUint64(strs []string) []uint64 {
return out
}
-func getForeignAssets() []uint64 {
- return stringsToUint64(foreignAssets)
-}
-
-func getForeignApps() []uint64 {
- return stringsToUint64(foreignApps)
+func stringsToBoxRefs(strs []string) []boxRef {
+ out := make([]boxRef, len(strs))
+ for i, brstr := range strs {
+ out[i] = newBoxRef(brstr)
+ }
+ return out
}
-func parseAppArg(arg appCallArg) (rawValue []byte, parseErr error) {
- switch arg.Encoding {
- case "str", "string":
- rawValue = []byte(arg.Value)
- case "int", "integer":
- num, err := strconv.ParseUint(arg.Value, 10, 64)
- if err != nil {
- parseErr = fmt.Errorf("Could not parse uint64 from string (%s): %v", arg.Value, err)
- return
- }
- ibytes := make([]byte, 8)
- binary.BigEndian.PutUint64(ibytes, num)
- rawValue = ibytes
- case "addr", "address":
- addr, err := basics.UnmarshalChecksumAddress(arg.Value)
- if err != nil {
- parseErr = fmt.Errorf("Could not unmarshal checksummed address from string (%s): %v", arg.Value, err)
- return
- }
- rawValue = addr[:]
- case "b32", "base32", "byte base32":
- data, err := base32.StdEncoding.DecodeString(arg.Value)
- if err != nil {
- parseErr = fmt.Errorf("Could not decode base32-encoded string (%s): %v", arg.Value, err)
- return
- }
- rawValue = data
- case "b64", "base64", "byte base64":
- data, err := base64.StdEncoding.DecodeString(arg.Value)
+func translateBoxRefs(input []boxRef, foreignApps []uint64) []transactions.BoxRef {
+ output := make([]transactions.BoxRef, len(input))
+ for i, tbr := range input {
+ rawName, err := tbr.name.Raw()
if err != nil {
- parseErr = fmt.Errorf("Could not decode base64-encoded string (%s): %v", arg.Value, err)
- return
- }
- rawValue = data
- case "abi":
- typeAndValue := strings.SplitN(arg.Value, ":", 2)
- if len(typeAndValue) != 2 {
- parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
- return
+ reportErrorf("Could not decode box name %s: %v", tbr.name, err)
}
- abiType, err := abi.TypeOf(typeAndValue[0])
- if err != nil {
- parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
- return
+
+ index := uint64(0)
+ if tbr.appID != 0 {
+ found := false
+ for a, id := range foreignApps {
+ if tbr.appID == id {
+ index = uint64(a + 1)
+ found = true
+ break
+ }
+ }
+ // Check appIdx after the foreignApps check. If the user actually
+ // put the appIdx in foreignApps, and then used the appIdx here
+ // (rather than 0), then maybe they really want to use it in the
+ // transaction as the full number. Though it's hard to see why.
+ if !found && tbr.appID == appIdx {
+ index = 0
+ found = true
+ }
+ if !found {
+ reportErrorf("Box ref with appId (%d) not in foreign-apps", tbr.appID)
+ }
}
- value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
- if err != nil {
- parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
- return
+ output[i] = transactions.BoxRef{
+ Index: index,
+ Name: rawName,
}
- return abiType.Encode(value)
- default:
- parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
}
- return
+ return output
}
-func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
+func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) {
accounts = inputs.Accounts
foreignApps = inputs.ForeignApps
foreignAssets = inputs.ForeignAssets
+ boxes = translateBoxRefs(inputs.Boxes, foreignApps)
args = make([][]byte, len(inputs.Args))
for i, arg := range inputs.Args {
- rawValue, err := parseAppArg(arg)
+ rawValue, err := arg.Raw()
if err != nil {
reportErrorf("Could not decode input at index %d: %v", i, err)
}
@@ -303,7 +319,7 @@ func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, for
return
}
-func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
+func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) {
var inputs appCallInputs
f, err := os.Open(appInputFilename)
if err != nil {
@@ -319,49 +335,31 @@ func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint
return parseAppInputs(inputs)
}
-// filterEmptyStrings filters out empty string parsed in by StringArrayVar
-// this function is added to support abi argument parsing
-// since parsing of `appArg` diverted from `StringSliceVar` to `StringArrayVar`
-func filterEmptyStrings(strSlice []string) []string {
- var newStrSlice []string
-
- for _, str := range strSlice {
- if len(str) > 0 {
- newStrSlice = append(newStrSlice, str)
- }
- }
- return newStrSlice
-}
-
-func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
- if (appArgs != nil || appStrAccounts != nil || foreignApps != nil) && appInputFilename != "" {
- reportErrorf("Cannot specify both command-line arguments/accounts and JSON input filename")
- }
+func getAppInputs() (args [][]byte, accounts []string, apps []uint64, assets []uint64, boxes []transactions.BoxRef) {
if appInputFilename != "" {
+ if appArgs != nil || appStrAccounts != nil || foreignApps != nil || foreignAssets != nil {
+ reportErrorf("Cannot specify both command-line arguments/resources and JSON input filename")
+ }
return processAppInputFile()
}
- var encodedArgs []appCallArg
-
- // we need to filter out empty strings from appArgs first, caused by change to `StringArrayVar`
- newAppArgs := filterEmptyStrings(appArgs)
+ // we need to ignore empty strings from appArgs because app-arg was
+ // previously a StringSliceVar, which also does that, and some test depend
+ // on it. appArgs became `StringArrayVar` in order to support abi arguments
+ // which contain commas.
- for _, arg := range newAppArgs {
- encodingValue := strings.SplitN(arg, ":", 2)
- if len(encodingValue) != 2 {
- reportErrorf("all arguments should be of the form 'encoding:value'")
- }
- encodedArg := appCallArg{
- Encoding: encodingValue[0],
- Value: encodingValue[1],
+ var encodedArgs []logic.AppCallBytes
+ for _, arg := range appArgs {
+ if len(arg) > 0 {
+ encodedArgs = append(encodedArgs, newAppCallBytes(arg))
}
- encodedArgs = append(encodedArgs, encodedArg)
}
inputs := appCallInputs{
Accounts: appStrAccounts,
- ForeignApps: getForeignApps(),
- ForeignAssets: getForeignAssets(),
+ ForeignApps: stringsToUint64(foreignApps),
+ ForeignAssets: stringsToUint64(foreignAssets),
+ Boxes: stringsToBoxRefs(appBoxes),
Args: encodedArgs,
}
@@ -450,14 +448,14 @@ var createAppCmd = &cobra.Command{
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
onCompletionEnum := mustParseOnCompletion(onCompletion)
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
switch onCompletionEnum {
case transactions.CloseOutOC, transactions.ClearStateOC:
reportWarnf("'--on-completion %s' may be ill-formed for 'goal app create'", onCompletion)
}
- tx, err := client.MakeUnsignedAppCreateTx(onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, extraPages)
+ tx, err := client.MakeUnsignedAppCreateTx(onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, boxes, extraPages)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -502,8 +500,8 @@ var createAppCmd = &cobra.Command{
if err != nil {
reportErrorf(err.Error())
}
- if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 {
- reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex)
+ if txn.ApplicationIndex != nil && *txn.ApplicationIndex != 0 {
+ reportInfof("Created app with app index %d", *txn.ApplicationIndex)
}
}
} else {
@@ -530,9 +528,9 @@ var updateAppCmd = &cobra.Command{
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppUpdateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, approvalProg, clearProg)
+ tx, err := client.MakeUnsignedAppUpdateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes, approvalProg, clearProg)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -600,9 +598,9 @@ var optInAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppOptInTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppOptInTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -670,9 +668,9 @@ var closeOutAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppCloseOutTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppCloseOutTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -740,9 +738,9 @@ var clearAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppClearStateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppClearStateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -810,9 +808,9 @@ var callAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppNoOpTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppNoOpTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -880,9 +878,9 @@ var deleteAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppDeleteTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppDeleteTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -1254,7 +1252,7 @@ var methodAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgsParsed, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgsParsed, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
if len(appArgsParsed) > 0 {
reportErrorf("--arg and --app-arg are mutually exclusive, do not use --app-arg")
}
@@ -1371,7 +1369,7 @@ var methodAppCmd = &cobra.Command{
}
appCallTxn, err := client.MakeUnsignedApplicationCallTx(
- appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets,
+ appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets, boxes,
onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, extraPages)
if err != nil {
@@ -1481,7 +1479,7 @@ var methodAppCmd = &cobra.Command{
reportErrorf(err.Error())
}
- resp, err := client.PendingTransactionInformationV2(txid)
+ resp, err := client.PendingTransactionInformation(txid)
if err != nil {
reportErrorf(err.Error())
}
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index 5826fd083..fdf5a61a2 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -175,21 +175,29 @@ func lookupAssetID(cmd *cobra.Command, creator string, client libgoal.Client) {
"creator account is unknown.")
}
- response, err := client.AccountInformation(creator)
+ response, err := client.AccountInformation(creator, true)
if err != nil {
reportErrorf(errorRequestFail, err)
}
nmatch := 0
- for id, params := range response.AssetParams {
- if params.UnitName == assetUnitName {
- assetID = id
- nmatch++
+ if response.CreatedAssets != nil {
+ for _, asset := range *response.CreatedAssets {
+ params := asset.Params
+ if params.UnitName == nil && assetUnitName == "" {
+ // Since asset unit names can be left blank, try to match
+ // empty unit names in the user's account first.
+ assetID = asset.Index
+ nmatch++
+ } else if params.UnitName != nil && *params.UnitName == assetUnitName {
+ assetID = asset.Index
+ nmatch++
+ }
}
}
if nmatch == 0 {
- reportErrorf("No matches for asset unit name %s in creator %s", assetUnitName, creator)
+ reportErrorf("No matches for asset unit name %s in creator %s; assets %v", assetUnitName, creator, *response.CreatedAssets)
}
if nmatch > 1 {
@@ -316,8 +324,8 @@ var createAssetCmd = &cobra.Command{
if err != nil {
reportErrorf(err.Error())
}
- if txn.TransactionResults != nil && txn.TransactionResults.CreatedAssetIndex != 0 {
- reportInfof("Created asset with asset index %d", txn.TransactionResults.CreatedAssetIndex)
+ if txn.AssetIndex != nil && *txn.AssetIndex != 0 {
+ reportInfof("Created asset with asset index %d", *txn.AssetIndex)
}
}
} else {
@@ -654,7 +662,7 @@ var freezeAssetCmd = &cobra.Command{
},
}
-func assetDecimalsFmt(amount uint64, decimals uint32) string {
+func assetDecimalsFmt(amount uint64, decimals uint64) string {
// Just return the raw amount with no decimal if decimals is 0
if decimals == 0 {
return fmt.Sprintf("%d", amount)
@@ -662,7 +670,7 @@ func assetDecimalsFmt(amount uint64, decimals uint32) string {
// Otherwise, ensure there are decimals digits to the right of the decimal point
pow := uint64(1)
- for i := uint32(0); i < decimals; i++ {
+ for i := uint64(0); i < decimals; i++ {
pow *= 10
}
return fmt.Sprintf("%d.%0*d", amount/pow, decimals, amount%pow)
@@ -754,42 +762,56 @@ var infoAssetCmd = &cobra.Command{
accountList := makeAccountsList(dataDir)
creator := accountList.getAddressByName(assetCreator)
+ // Helper methods for dereferencing optional asset fields.
+ derefString := func(s *string) string {
+ if s == nil {
+ return ""
+ }
+ return *s
+ }
+ derefBool := func(b *bool) bool {
+ if b == nil {
+ return false
+ }
+ return *b
+ }
+
lookupAssetID(cmd, creator, client)
- params, err := client.AssetInformation(assetID)
+ asset, err := client.AssetInformation(assetID)
if err != nil {
reportErrorf(errorRequestFail, err)
}
reserveEmpty := false
- if params.ReserveAddr == "" {
+ if derefString(asset.Params.Reserve) == "" {
reserveEmpty = true
- params.ReserveAddr = params.Creator
+ asset.Params.Reserve = &asset.Params.Creator
}
- reserve, err := client.AccountInformation(params.ReserveAddr)
+ reserve, err := client.AccountAssetInformation(*asset.Params.Reserve, assetID)
if err != nil {
reportErrorf(errorRequestFail, err)
}
-
- res := reserve.Assets[assetID]
+ res := reserve.AssetHolding
fmt.Printf("Asset ID: %d\n", assetID)
- fmt.Printf("Creator: %s\n", params.Creator)
- reportInfof("Asset name: %s\n", params.AssetName)
- reportInfof("Unit name: %s\n", params.UnitName)
- fmt.Printf("Maximum issue: %s %s\n", assetDecimalsFmt(params.Total, params.Decimals), params.UnitName)
- fmt.Printf("Reserve amount: %s %s\n", assetDecimalsFmt(res.Amount, params.Decimals), params.UnitName)
- fmt.Printf("Issued: %s %s\n", assetDecimalsFmt(params.Total-res.Amount, params.Decimals), params.UnitName)
- fmt.Printf("Decimals: %d\n", params.Decimals)
- fmt.Printf("Default frozen: %v\n", params.DefaultFrozen)
- fmt.Printf("Manager address: %s\n", params.ManagerAddr)
+ fmt.Printf("Creator: %s\n", asset.Params.Creator)
+ reportInfof("Asset name: %s", derefString(asset.Params.Name))
+ reportInfof("Unit name: %s", derefString(asset.Params.UnitName))
+ reportInfof("URL: %s", derefString(asset.Params.Url))
+ fmt.Printf("Maximum issue: %s %s\n", assetDecimalsFmt(asset.Params.Total, asset.Params.Decimals), derefString(asset.Params.UnitName))
+ fmt.Printf("Reserve amount: %s %s\n", assetDecimalsFmt(res.Amount, asset.Params.Decimals), derefString(asset.Params.UnitName))
+ fmt.Printf("Issued: %s %s\n", assetDecimalsFmt(asset.Params.Total-res.Amount, asset.Params.Decimals), derefString(asset.Params.UnitName))
+ fmt.Printf("Decimals: %d\n", asset.Params.Decimals)
+ fmt.Printf("Default frozen: %v\n", derefBool(asset.Params.DefaultFrozen))
+ fmt.Printf("Manager address: %s\n", derefString(asset.Params.Manager))
if reserveEmpty {
- fmt.Printf("Reserve address: %s (Empty. Defaulting to creator)\n", params.ReserveAddr)
+ fmt.Printf("Reserve address: %s (Empty. Defaulting to creator)\n", derefString(asset.Params.Reserve))
} else {
- fmt.Printf("Reserve address: %s\n", params.ReserveAddr)
+ fmt.Printf("Reserve address: %s\n", derefString(asset.Params.Reserve))
}
- fmt.Printf("Freeze address: %s\n", params.FreezeAddr)
- fmt.Printf("Clawback address: %s\n", params.ClawbackAddr)
+ fmt.Printf("Freeze address: %s\n", derefString(asset.Params.Freeze))
+ fmt.Printf("Clawback address: %s\n", derefString(asset.Params.Clawback))
},
}
diff --git a/cmd/goal/box.go b/cmd/goal/box.go
new file mode 100644
index 000000000..f603b043d
--- /dev/null
+++ b/cmd/goal/box.go
@@ -0,0 +1,118 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+var boxName string
+var maxBoxes uint64
+
+func init() {
+ appCmd.AddCommand(appBoxCmd)
+
+ appBoxCmd.AddCommand(appBoxInfoCmd)
+ appBoxCmd.AddCommand(appBoxListCmd)
+ appBoxCmd.PersistentFlags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
+ appBoxCmd.MarkFlagRequired("app-id")
+
+ appBoxInfoCmd.Flags().StringVarP(&boxName, "name", "n", "", "Application box name. Use the same form as app-arg to name the box.")
+ appBoxInfoCmd.MarkFlagRequired("name")
+
+ appBoxListCmd.Flags().Uint64VarP(&maxBoxes, "max", "m", 0, "Maximum number of boxes to list. 0 means no limit.")
+}
+
+var appBoxCmd = &cobra.Command{
+ Use: "box",
+ Short: "Read application box data",
+ Args: cobra.ArbitraryArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ // If no arguments passed, we should fallback to help
+ cmd.HelpFunc()(cmd, args)
+ },
+}
+
+var appBoxInfoCmd = &cobra.Command{
+ Use: "info",
+ Short: "Retrieve information about an application box.",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ _, client := getDataDirAndClient()
+
+ // Ensure box name is specified
+ if boxName == "" {
+ reportErrorf(errorMissingBoxName)
+ }
+
+ // Get box info
+ box, err := client.GetApplicationBoxByName(appIdx, boxName)
+ if err != nil {
+ if strings.Contains(err.Error(), "box not found") {
+ reportErrorf("No box found for appid %d with name %s", appIdx, boxName)
+ }
+ reportErrorf(errorRequestFail, err)
+ }
+
+ // Print inputted box name, but check that it matches found box name first
+ // This reduces confusion of potentially receiving a different box name representation
+ boxNameBytes, err := newAppCallBytes(boxName).Raw()
+ if err != nil {
+ reportErrorf(errorInvalidBoxName, boxName, err)
+ }
+ if !bytes.Equal(box.Name, boxNameBytes) {
+ reportErrorf(errorBoxNameMismatch, box.Name, boxNameBytes)
+ }
+ reportInfof("Name: %s", boxName)
+
+ // Print box value
+ reportInfof("Value: %s", encodeBytesAsAppCallBytes(box.Value))
+ },
+}
+
+var appBoxListCmd = &cobra.Command{
+ Use: "list",
+ Short: "List all application boxes belonging to an application",
+ Long: "List all application boxes belonging to an application.\n" +
+ "For printable strings, the box name is formatted as 'str:hello'\n" +
+ "For everything else, the box name is formatted as 'b64:A=='. ",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ _, client := getDataDirAndClient()
+
+ // Get app boxes
+ boxesRes, err := client.ApplicationBoxes(appIdx, maxBoxes)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+ boxes := boxesRes.Boxes
+
+ // Error if no boxes found
+ if len(boxes) == 0 {
+ reportErrorf("No boxes found for appid %d", appIdx)
+ }
+
+ // Print app boxes
+ for _, descriptor := range boxes {
+ encodedName := encodeBytesAsAppCallBytes(descriptor.Name)
+ reportInfof("%s", encodedName)
+ }
+ },
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 8ab329326..b91425e40 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -29,8 +29,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -156,44 +155,43 @@ var clerkCmd = &cobra.Command{
},
}
-func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) (txn v1.Transaction, err error) {
+func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) (txn model.PendingTransactionResponse, err error) {
// Get current round information
stat, err := client.Status()
if err != nil {
- return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
+ return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err)
}
for {
// Check if we know about the transaction yet
txn, err = client.PendingTransactionInformation(txid)
if err != nil {
- return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
+ return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err)
}
- if txn.ConfirmedRound > 0 {
- reportInfof(infoTxCommitted, txid, txn.ConfirmedRound)
+ if txn.ConfirmedRound != nil && *txn.ConfirmedRound > 0 {
+ reportInfof(infoTxCommitted, txid, *txn.ConfirmedRound)
break
}
if txn.PoolError != "" {
- return v1.Transaction{}, fmt.Errorf(txPoolError, txid, txn.PoolError)
+ return model.PendingTransactionResponse{}, fmt.Errorf(txPoolError, txid, txn.PoolError)
}
// check if we've already committed to the block number equals to the transaction's last valid round.
// if this is the case, the transaction would not be included in the blockchain, and we can exit right
// here.
if transactionLastValidRound > 0 && stat.LastRound >= transactionLastValidRound {
- return v1.Transaction{}, fmt.Errorf(errorTransactionExpired, txid)
+ return model.PendingTransactionResponse{}, fmt.Errorf(errorTransactionExpired, txid)
}
reportInfof(infoTxPending, txid, stat.LastRound)
// WaitForRound waits until round "stat.LastRound+1" is committed
stat, err = client.WaitForRound(stat.LastRound)
if err != nil {
- return v1.Transaction{}, fmt.Errorf(errorRequestFail, err)
+ return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err)
}
}
-
return
}
@@ -610,7 +608,7 @@ var rawsendCmd = &cobra.Command{
continue
}
- if txn.ConfirmedRound > 0 {
+ if txn.ConfirmedRound != nil && *txn.ConfirmedRound > 0 {
reportInfof(infoTxCommitted, txidStr, txn.ConfirmedRound)
break
}
@@ -1205,7 +1203,7 @@ var dryrunRemoteCmd = &cobra.Command{
return
}
- stackToString := func(stack []generatedV2.TealValue) string {
+ stackToString := func(stack []model.TealValue) string {
result := make([]string, len(stack))
for i, sv := range stack {
if sv.Type == uint64(basics.TealBytesType) {
@@ -1219,7 +1217,7 @@ var dryrunRemoteCmd = &cobra.Command{
if len(resp.Txns) > 0 {
for i, txnResult := range resp.Txns {
var msgs []string
- var trace []generatedV2.DryrunState
+ var trace []model.DryrunState
if txnResult.AppCallMessages != nil && len(*txnResult.AppCallMessages) > 0 {
msgs = *txnResult.AppCallMessages
if txnResult.AppCallTrace != nil {
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index 8fb4550c0..5440c04ec 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -17,6 +17,7 @@
package main
import (
+ "flag"
"fmt"
"io"
"os"
@@ -552,13 +553,23 @@ func reportErrorln(args ...interface{}) {
}
fmt.Fprintln(os.Stderr, line)
}
- os.Exit(1)
+ exit(1)
}
func reportErrorf(format string, args ...interface{}) {
reportErrorln(fmt.Sprintf(format, args...))
}
+func exit(code int) {
+ if flag.Lookup("test.v") == nil {
+ // normal run
+ os.Exit(code)
+ } else {
+ // testing run. panic, so we can require.Panic
+ panic(code)
+ }
+}
+
// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
diff --git a/cmd/goal/examples/boxes.teal b/cmd/goal/examples/boxes.teal
new file mode 100644
index 000000000..45e284d6b
--- /dev/null
+++ b/cmd/goal/examples/boxes.teal
@@ -0,0 +1,60 @@
+// WARNING: THIS IS NOT A PRODUCTION QUALITY PROGRAM - FOR TEST PURPOSES ONLY
+
+#pragma version 8
+ txn ApplicationID
+ bz end
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 24 // [24]
+ txn NumAppArgs // [24, NumAppArgs]
+ int 2 // [24, NumAppArgs, 2]
+ == // [24, NumAppArgs=?=2]
+ bnz default // WARNING: Assumes that when "create" provided, NumAppArgs >= 3
+ pop // get rid of 24 // NumAppArgs != 2
+ txn ApplicationArgs 2 // [arg[2]] // ERROR when NumAppArgs == 1
+ btoi // [btoi(arg[2])]
+default: // [24] // NumAppArgs >= 3
+ txn ApplicationArgs 1 // [24, arg[1]]
+ swap // [arg[1], 24]
+ box_create // [] // boxes: arg[1] -> [24]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz test // "set" ? continue : goto test
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+test: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "check" // [arg[0], "check"]
+ == // [arg[0]=?="check"]
+ bz bad // "check" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ len // [arg[1], 0, len(arg[2])]
+ box_extract // [ boxes[arg[1]][0:len(arg[2])] ]
+ txn ApplicationArgs 2 // [ boxes[arg[1]][0:len(arg[2])], arg[2] ]
+ == // [ boxes[arg[1]][0:len(arg[2])]=?=arg[2] ]
+ assert // boxes[arg[1]].startwith(arg[2]) ? pop : ERROR
+ b end
+bad: // arg[0] ∉ {"create", "delete", "set", "check"}
+ err
+end:
+ int 1
diff --git a/cmd/goal/examples/clear.teal b/cmd/goal/examples/clear.teal
new file mode 100644
index 000000000..2b67f1fa6
--- /dev/null
+++ b/cmd/goal/examples/clear.teal
@@ -0,0 +1,2 @@
+#pragma version 8
+int 1
diff --git a/cmd/goal/formatting.go b/cmd/goal/formatting.go
index 06bfcad85..ed4b3116e 100644
--- a/cmd/goal/formatting.go
+++ b/cmd/goal/formatting.go
@@ -17,6 +17,7 @@
package main
import (
+ "encoding/base64"
"unicode"
"unicode/utf8"
@@ -190,9 +191,19 @@ func heuristicFormatVal(val basics.TealValue) basics.TealValue {
}
func heuristicFormat(state map[string]basics.TealValue) map[string]basics.TealValue {
- result := make(map[string]basics.TealValue)
+ result := make(map[string]basics.TealValue, len(state))
for k, v := range state {
result[heuristicFormatKey(k)] = heuristicFormatVal(v)
}
return result
}
+
+// Encode bytes as an app call bytes string.
+// Will use `str:` if the string is printable, otherwise `b64:`.
+func encodeBytesAsAppCallBytes(value []byte) string {
+ if isPrintable, _ := unicodePrintable(string(value)); isPrintable {
+ return "str:" + string(value)
+ }
+
+ return "b64:" + base64.StdEncoding.EncodeToString(value)
+}
diff --git a/cmd/goal/formatting_test.go b/cmd/goal/formatting_test.go
index bc3bce670..9915597d7 100644
--- a/cmd/goal/formatting_test.go
+++ b/cmd/goal/formatting_test.go
@@ -42,3 +42,74 @@ func TestUnicodePrintable(t *testing.T) {
require.Equalf(t, testElement.printableString, printableString, "test string:%s", testElement.testString)
}
}
+
+func TestNewAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ acb := newAppCallBytes("int:3")
+ require.Equal(t, "int", acb.Encoding)
+ require.Equal(t, "3", acb.Value)
+ _, err := acb.Raw()
+ require.NoError(t, err)
+
+ require.Panics(t, func() { newAppCallBytes("hello") })
+}
+
+func TestNewBoxRef(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ br := newBoxRef("str:hello")
+ require.EqualValues(t, 0, br.appID)
+ require.Equal(t, "str", br.name.Encoding)
+ require.Equal(t, "hello", br.name.Value)
+
+ require.Panics(t, func() { newBoxRef("1,hello") })
+ require.Panics(t, func() { newBoxRef("hello") })
+
+ br = newBoxRef("2,str:hello")
+ require.EqualValues(t, 2, br.appID)
+ require.Equal(t, "str", br.name.Encoding)
+ require.Equal(t, "hello", br.name.Value)
+}
+
+func TestStringsToBoxRefs(t *testing.T) {
+ brs := stringsToBoxRefs([]string{"77,str:hello", "55,int:6", "int:88"})
+ require.EqualValues(t, 77, brs[0].appID)
+ require.EqualValues(t, 55, brs[1].appID)
+ require.EqualValues(t, 0, brs[2].appID)
+
+ tbrs := translateBoxRefs(brs, []uint64{55, 77})
+ require.EqualValues(t, 2, tbrs[0].Index)
+ require.EqualValues(t, 1, tbrs[1].Index)
+ require.EqualValues(t, 0, tbrs[2].Index)
+
+ require.Panics(t, func() { translateBoxRefs(stringsToBoxRefs([]string{"addr:88"}), nil) })
+ translateBoxRefs(stringsToBoxRefs([]string{"addr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ"}), nil)
+ // if we're here, that didn't panic/exit
+
+ tbrs = translateBoxRefs(brs, []uint64{77, 55})
+ require.EqualValues(t, 1, tbrs[0].Index)
+ require.EqualValues(t, 2, tbrs[1].Index)
+ require.EqualValues(t, 0, tbrs[2].Index)
+
+ require.Panics(t, func() { translateBoxRefs(brs, []uint64{55, 78}) })
+ require.Panics(t, func() { translateBoxRefs(brs, []uint64{51, 77}) })
+}
+
+func TestBytesToAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ testCases := []struct {
+ input []byte
+ expected string
+ }{
+ {[]byte("unicode"), "str:unicode"},
+ {[]byte{1, 2, 3, 4}, "b64:AQIDBA=="},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.expected, func(t *testing.T) {
+ acb := encodeBytesAsAppCallBytes(tc.input)
+ require.Equal(t, tc.expected, acb)
+ })
+ }
+}
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 45fb5bf8a..8777163e7 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -513,7 +513,7 @@ var appExecuteCmd = &cobra.Command{
var inputs appCallInputs
for _, arg := range proc.Args {
- var callArg appCallArg
+ var callArg logic.AppCallBytes
callArg.Encoding = arg.Kind
if !procFlags.Changed(arg.Name) && arg.Default != "" {
@@ -565,7 +565,7 @@ var appExecuteCmd = &cobra.Command{
appArgs := make([][]byte, len(inputs.Args))
for i, arg := range inputs.Args {
- rawValue, err := parseAppArg(arg)
+ rawValue, err := arg.Raw()
if err != nil {
reportErrorf("Could not parse argument corresponding to '%s': %v", proc.Args[i].Name, err)
}
@@ -586,7 +586,7 @@ var appExecuteCmd = &cobra.Command{
localSchema = header.Query.Local.ToStateSchema()
globalSchema = header.Query.Global.ToStateSchema()
}
- tx, err := client.MakeUnsignedApplicationCallTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, onCompletion, approvalProg, clearProg, globalSchema, localSchema, 0)
+ tx, err := client.MakeUnsignedApplicationCallTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, nil, onCompletion, approvalProg, clearProg, globalSchema, localSchema, 0)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -634,8 +634,8 @@ var appExecuteCmd = &cobra.Command{
if err != nil {
reportErrorf(err.Error())
}
- if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 {
- reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex)
+ if txn.ApplicationIndex != nil && *txn.ApplicationIndex != 0 {
+ reportInfof("Created app with app index %d", *txn.ApplicationIndex)
}
}
} else {
diff --git a/cmd/goal/ledger.go b/cmd/goal/ledger.go
index fda139bd7..d06cf3237 100644
--- a/cmd/goal/ledger.go
+++ b/cmd/goal/ledger.go
@@ -65,7 +65,7 @@ var supplyCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- fmt.Printf("Round: %v\nTotal Money: %v microAlgos\nOnline Money: %v microAlgos\n", response.Round, response.TotalMoney, response.OnlineMoney)
+ fmt.Printf("Round: %v\nTotal Money: %v microAlgos\nOnline Money: %v microAlgos\n", response.CurrentRound, response.TotalMoney, response.OnlineMoney)
},
}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index 35ec43efa..3fd1ce36c 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -64,7 +64,7 @@ const (
infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
catchupStoppedOnUnsupported = "Last supported block (%d) is committed. The next block consensus protocol is not supported. Catchup service is stopped."
infoNodeCatchpointCatchupStatus = "Last committed block: %d\nSync Time: %s\nCatchpoint: %s"
- infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d"
+ infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d\nCatchpoint total KVs: %d\nCatchpoint KVs processed: %d\nCatchpoint KVs verified: %d"
infoNodeCatchpointCatchupBlocks = "Catchpoint total blocks: %d\nCatchpoint downloaded blocks: %d"
nodeLastCatchpoint = "Last Catchpoint: %s"
errorNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number"
@@ -100,6 +100,9 @@ const (
errorMarshalingState = "failed to encode state: %s"
errorApprovProgArgsRequired = "Exactly one of --approval-prog or --approval-prog-raw is required"
errorClearProgArgsRequired = "Exactly one of --clear-prog or --clear-prog-raw is required"
+ errorMissingBoxName = "Box --name is required"
+ errorInvalidBoxName = "Failed to parse box name %s. It must have the same form as app-arg. Error: %s"
+ errorBoxNameMismatch = "Inputted box name %s does not match box name %s received from algod"
// Clerk
infoTxIssued = "Sent %d MicroAlgos from account %s to address %s, transaction ID: %s. Fee set to %d"
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 68654055a..a17551c47 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -33,9 +33,8 @@ import (
"github.com/spf13/cobra"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
-
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/network"
@@ -437,7 +436,7 @@ func getStatus(dataDir string) {
}
}
-func makeStatusString(stat generatedV2.NodeStatusResponse) string {
+func makeStatusString(stat model.NodeStatusResponse) string {
lastRoundTime := fmt.Sprintf("%.1fs", time.Duration(stat.TimeSinceLastRound).Seconds())
catchupTime := fmt.Sprintf("%.1fs", time.Duration(stat.CatchupTime).Seconds())
var statusString string
@@ -468,7 +467,8 @@ func makeStatusString(stat generatedV2.NodeStatusResponse) string {
if stat.CatchpointTotalAccounts != nil && (*stat.CatchpointTotalAccounts > 0) && stat.CatchpointProcessedAccounts != nil {
statusString = statusString + "\n" + fmt.Sprintf(infoNodeCatchpointCatchupAccounts, *stat.CatchpointTotalAccounts,
- *stat.CatchpointProcessedAccounts, *stat.CatchpointVerifiedAccounts)
+ *stat.CatchpointProcessedAccounts, *stat.CatchpointVerifiedAccounts,
+ *stat.CatchpointTotalKvs, *stat.CatchpointProcessedKvs, *stat.CatchpointVerifiedKvs)
}
if stat.CatchpointAcquiredBlocks != nil && stat.CatchpointTotalBlocks != nil && (*stat.CatchpointAcquiredBlocks+*stat.CatchpointTotalBlocks > 0) {
statusString = statusString + "\n" + fmt.Sprintf(infoNodeCatchpointCatchupBlocks, *stat.CatchpointTotalBlocks,
@@ -525,23 +525,23 @@ var pendingTxnsCmd = &cobra.Command{
Run: func(cmd *cobra.Command, _ []string) {
onDataDirs(func(dataDir string) {
client := ensureAlgodClient(dataDir)
- statusTxnPool, err := client.GetPendingTransactions(maxPendingTransactions)
+ statusTxnPool, err := client.GetParsedPendingTransactions(maxPendingTransactions)
if err != nil {
reportErrorf(errorNodeStatus, err)
}
- pendingTxns := statusTxnPool.TruncatedTxns
+ pendingTxns := statusTxnPool.TopTransactions
// do this inline for now, break it out when we need to reuse a Txn->String function
- reportInfof(infoNodePendingTxnsDescription, maxPendingTransactions, statusTxnPool.TotalTxns)
- if pendingTxns.Transactions == nil || len(pendingTxns.Transactions) == 0 {
+ reportInfof(infoNodePendingTxnsDescription, maxPendingTransactions, statusTxnPool.TotalTransactions)
+ if len(statusTxnPool.TopTransactions) == 0 {
reportInfof(infoNodeNoPendingTxnsDescription)
} else {
- for _, pendingTxn := range pendingTxns.Transactions {
+ for _, pendingTxn := range pendingTxns {
pendingTxnStr, err := json.MarshalIndent(pendingTxn, "", " ")
if err != nil {
// json parsing of the txn failed, so let's just skip printing it
- fmt.Printf("Unparseable Transaction %s\n", pendingTxn.TxID)
+ fmt.Printf("Unparseable Transaction %s\n", pendingTxn.Txn.ID().String())
continue
}
fmt.Printf("%s\n", string(pendingTxnStr))
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 37e113471..54f1f5248 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -31,7 +31,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/passphrase"
"github.com/algorand/go-algorand/daemon/algod/api/client"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -191,7 +191,7 @@ func spendLoop(cfg config, privateKey []*crypto.SignatureSecrets, publicKey []ba
return nil
}
-func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) (nodeStatus generatedV2.NodeStatusResponse) {
+func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) (nodeStatus model.NodeStatusResponse) {
var err error
for {
nodeStatus, err = restClient.Status()
@@ -225,7 +225,7 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool)
const transactionBlockSize = 800
-func generateTransactions(restClient client.RestClient, cfg config, privateKeys []*crypto.SignatureSecrets, publicKeys []basics.Address, nodeStatus generatedV2.NodeStatusResponse) (queueFull bool) {
+func generateTransactions(restClient client.RestClient, cfg config, privateKeys []*crypto.SignatureSecrets, publicKeys []basics.Address, nodeStatus model.NodeStatusResponse) (queueFull bool) {
start := time.Now()
var err error
var vers common.Version
diff --git a/cmd/netgoal/README.md b/cmd/netgoal/README.md
new file mode 100644
index 000000000..8b1ebc870
--- /dev/null
+++ b/cmd/netgoal/README.md
@@ -0,0 +1,52 @@
+# Netgoal
+
+## netgoal generate
+`--participation-host-machines (-N)` and `--npn-host-machines (-X)` are optional parameters and they default to `--participation-algod-nodes (-n)` and `--npn-algod-nodes (-x)` respectively, i.e. defaults to a machine per algod node.
+
+### Long-Form Flags Example
+- Wallets: The command below will generate 100 wallets for the 100 participation algod nodes. By default each npn gets one wallet each. If there are more wallets than nodes, it will get split across the participation algod nodes.
+- Relays: 8 Relays and 8 machines to host the relays will be generated
+- Participation Nodes: 100 particiipation algod nodes will be distributed across 20 host machines.
+- Non-Participation Nodes (NPNs): 10 non-participation algod nodes will be distributed across 5 host machines.
+
+```
+netgoal generate -t net -r /tmp/wat -o net.json --wallets 100 --relays 8 --participation-host-machines 20 --participation-algod-nodes 100 --npn-host-machines 5 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+```
+
+### Short-Form Flags Example
+The following will result in the same outcome as the command above.
+```
+netgoal generate -t net -r /tmp/wat -o net.json -w 100 -R 8 -N 20 -n 100 -X 5 -x 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+```
+## Flags
+```
+netgoal generate -h
+
+Usage:
+ netgoal generate [flags]
+
+Flags:
+ --bal stringArray Application Count
+ -h, --help help for generate
+ --naccounts uint Account count (default 31)
+ --napps uint Application Count (default 7)
+ --nassets uint Asset count (default 5)
+ --node-template string json for one node
+ --non-participating-node-template string json for non participating node
+ -x, --npn-algod-nodes int Total non-participation algod nodes to generate
+ -X, --npn-host-machines int Host machines to generate for non-participation algod nodes, default=npn-algod-nodes
+ --ntxns uint Transaction count (default 17)
+ -o, --outputfile string Output filename
+ -n, --participation-algod-nodes int Total participation algod nodes to generate (default -1)
+ -N, --participation-host-machines int Host machines to generate for participation algod nodes, default=participation-algod-nodes (default -1)
+ --relay-template string json for a relay node
+ -R, --relays int Relays to generate (default -1)
+ --rounds uint Number of rounds (default 13)
+ -t, --template string Template to generate
+ --wallet-name string Source wallet name
+ -w, --wallets int Wallets to generate (default -1)
+
+Global Flags:
+ -m, --modifier string Override Genesis Version Modifier (eg 'v1')
+ -r, --rootdir string Root directory for the private network directories
+```
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 725b5e5cf..0eed602bf 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -37,10 +37,10 @@ import (
var outputFilename string
var templateToGenerate string
var relaysToGenerate int
-var nodesToGenerate int
-var nodeHostsToGenerate int
-var nonPartnodesToGenerate int
-var nonPartnodesHostsToGenerate int
+var participationAlgodNodes int
+var participationHostMachines int
+var npnAlgodNodes int
+var npnHostMachines int
var walletsToGenerate int
var nodeTemplatePath string
var nonParticipatingNodeTemplatePath string
@@ -63,10 +63,10 @@ func init() {
generateCmd.Flags().IntVarP(&walletsToGenerate, "wallets", "w", -1, "Wallets to generate")
generateCmd.Flags().IntVarP(&relaysToGenerate, "relays", "R", -1, "Relays to generate")
- generateCmd.Flags().IntVarP(&nodeHostsToGenerate, "node-hosts", "N", -1, "Node-hosts to generate, default=nodes")
- generateCmd.Flags().IntVarP(&nodesToGenerate, "nodes", "n", -1, "Nodes to generate")
- generateCmd.Flags().IntVarP(&nonPartnodesToGenerate, "non-participating-nodes", "X", 0, "Non participating nodes to generate")
- generateCmd.Flags().IntVarP(&nonPartnodesHostsToGenerate, "non-participating-nodes-hosts", "H", 0, "Non participating nodes hosts to generate")
+ generateCmd.Flags().IntVarP(&participationAlgodNodes, "participation-algod-nodes", "n", -1, "Total participation algod nodes to generate")
+ generateCmd.Flags().IntVarP(&participationHostMachines, "participation-host-machines", "N", -1, "Host machines to generate for participation algod nodes, default=participation-algod-nodes")
+ generateCmd.Flags().IntVarP(&npnAlgodNodes, "npn-algod-nodes", "x", 0, "Total non-participation algod nodes to generate")
+ generateCmd.Flags().IntVarP(&npnHostMachines, "npn-host-machines", "X", 0, "Host machines to generate for non-participation algod nodes, default=npn-algod-nodes")
generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node")
generateCmd.Flags().StringVarP(&nonParticipatingNodeTemplatePath, "non-participating-node-template", "", "", "json for non participating node")
generateCmd.Flags().StringVarP(&relayTemplatePath, "relay-template", "", "", "json for a relay node")
@@ -149,24 +149,27 @@ template modes for -t:`,
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
- err = generateWalletGenesis(outputFilename, walletsToGenerate, nonPartnodesToGenerate)
+ err = generateWalletGenesis(outputFilename, walletsToGenerate, npnAlgodNodes)
case "net", "network", "goalnet":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
- if nodesToGenerate < 0 {
+ if participationAlgodNodes < 0 {
reportErrorf("must specify number of nodes with -n")
}
- if nodeHostsToGenerate < 0 {
- nodeHostsToGenerate = nodesToGenerate
+ if participationHostMachines < 0 {
+ participationHostMachines = participationAlgodNodes
+ }
+ if (npnAlgodNodes >= 0) && (npnHostMachines == 0) {
+ npnHostMachines = npnAlgodNodes
}
if relaysToGenerate < 0 {
reportErrorf("must specify number of relays with -R")
}
if templateType == "goalnet" {
- err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate)
+ err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, participationAlgodNodes, npnAlgodNodes)
} else {
- err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, nonPartnodesToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
+ err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, participationHostMachines, participationAlgodNodes, npnHostMachines, npnAlgodNodes, baseNode, baseNonParticipatingNode, baseRelay)
}
case "otwt":
err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, 0, 0, baseNode, baseNonParticipatingNode, baseRelay)
@@ -234,9 +237,9 @@ func pickNodeConfig(alt []remote.NodeConfig, name string) remote.NodeConfig {
return alt[0]
}
-func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes, npnHosts int) error {
+func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes, npnNodes int) error {
template := netdeploy.NetworkTemplate{}
- template.Nodes = make([]remote.NodeConfigGoal, 0, relays+nodes+npnHosts)
+ template.Nodes = make([]remote.NodeConfigGoal, 0, relays+nodes+npnNodes)
template.Genesis = generateWalletGenesisData(walletsToGenerate, 0)
for i := 0; i < relays; i++ {
name := "relay" + strconv.Itoa(i+1)
@@ -257,7 +260,7 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
template.Nodes = append(template.Nodes, newNode)
}
- for i := 0; i < npnHosts; i++ {
+ for i := 0; i < npnNodes; i++ {
name := "nonParticipatingNode" + strconv.Itoa(i+1)
newNode := remote.NodeConfigGoal{
Name: name,
@@ -286,8 +289,8 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
}
}
- if npnHosts > 0 {
- for walletIndex < npnHosts {
+ if npnNodes > 0 {
+ for walletIndex < npnNodes {
for nodei, node := range template.Nodes {
if node.Name[0:4] != "nonP" {
continue
@@ -298,11 +301,11 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
}
template.Nodes[nodei].Wallets = append(template.Nodes[nodei].Wallets, wallet)
walletIndex++
- if walletIndex >= npnHosts {
+ if walletIndex >= npnNodes {
break
}
}
- if walletIndex >= npnHosts {
+ if walletIndex >= npnNodes {
break
}
}
@@ -478,18 +481,18 @@ func saveGoalTemplateToDisk(template netdeploy.NetworkTemplate, filename string)
return err
}
-func generateWalletGenesisData(wallets, npnHosts int) gen.GenesisData {
+func generateWalletGenesisData(wallets, npnNodes int) gen.GenesisData {
ratZero := big.NewRat(int64(0), int64(1))
ratHundred := big.NewRat(int64(100), int64(1))
data := gen.DefaultGenesis
- totalWallets := wallets + npnHosts
+ totalWallets := wallets + npnNodes
data.Wallets = make([]gen.WalletData, totalWallets)
participatingNodeStake := big.NewRat(int64(100), int64(wallets))
nonParticipatingNodeStake := ratZero
- if npnHosts > 0 {
+ if npnNodes > 0 {
// split participating an non participating stake evenly
participatingNodeStake = big.NewRat(int64(50), int64(wallets))
- nonParticipatingNodeStake = big.NewRat(int64(50), int64(npnHosts))
+ nonParticipatingNodeStake = big.NewRat(int64(50), int64(npnNodes))
}
stake := ratZero
@@ -519,8 +522,8 @@ func generateWalletGenesisData(wallets, npnHosts int) gen.GenesisData {
return data
}
-func generateWalletGenesis(filename string, wallets, npnHosts int) error {
- data := generateWalletGenesisData(wallets, npnHosts)
+func generateWalletGenesis(filename string, wallets, npnNodes int) error {
+ data := generateWalletGenesisData(wallets, npnNodes)
return saveGenesisDataToDisk(data, filename)
}
diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go
index e7111cc4e..4ebba13f1 100644
--- a/cmd/netgoal/network.go
+++ b/cmd/netgoal/network.go
@@ -20,6 +20,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "runtime/pprof"
"strings"
"github.com/spf13/cobra"
@@ -35,6 +36,8 @@ var networkName string
var networkGenesisVersionModifier string
var miscStringStringTokens []string
+var cpuprofilePath string
+
var networkUseGenesisFiles bool
var networkIgnoreExistingDir bool
var bootstrapLoadingFile bool
@@ -54,6 +57,7 @@ func init() {
networkBuildCmd.Flags().BoolVarP(&bootstrapLoadingFile, "gen-db-files", "b", false, "Generate database files.")
networkBuildCmd.Flags().BoolVarP(&networkIgnoreExistingDir, "force", "f", false, "Force generation into existing directory.")
networkBuildCmd.Flags().StringSliceVarP(&miscStringStringTokens, "val", "v", nil, "name=value, may be reapeated")
+ networkBuildCmd.Flags().StringVar(&cpuprofilePath, "cpuprofile", "", "write cpu profile to path")
rootCmd.PersistentFlags().StringVarP(&networkGenesisVersionModifier, "modifier", "m", "", "Override Genesis Version Modifier (eg 'v1')")
}
@@ -75,6 +79,18 @@ var networkBuildCmd = &cobra.Command{
}
func runBuildNetwork() (err error) {
+ if cpuprofilePath != "" {
+ f, err := os.Create(cpuprofilePath)
+ if err != nil {
+ log.Fatalf("%s: could not create CPU profile, %v", cpuprofilePath, err)
+ }
+ defer f.Close() // error handling omitted for example
+ if err := pprof.StartCPUProfile(f); err != nil {
+ log.Fatalf("%s: could not start CPU profile, %v", cpuprofilePath, err)
+ }
+ defer pprof.StopCPUProfile()
+ }
+
networkRootDir, err := filepath.Abs(networkRootDir)
if err != nil {
return
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index 7c193d01e..6501babc0 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -169,6 +169,7 @@ func buildSyntaxHighlight() *tmLanguage {
},
},
}
+ var allAccess []string
var allArithmetics []string
var keys []string
@@ -192,11 +193,8 @@ func buildSyntaxHighlight() *tmLanguage {
Name: "keyword.other.teal",
Match: fmt.Sprintf("^(%s)\\b", strings.Join(loading, "|")),
})
- case "State Access":
- keywords.Patterns = append(keywords.Patterns, pattern{
- Name: "keyword.other.unit.teal",
- Match: fmt.Sprintf("^(%s)\\b", strings.Join(names, "|")),
- })
+ case "State Access", "Box Access":
+ allAccess = append(allAccess, names...)
// For these, accumulate into allArithmetics,
// and only add to keyword.Patterns later, when all
// have been collected.
@@ -231,6 +229,10 @@ func buildSyntaxHighlight() *tmLanguage {
}
}
keywords.Patterns = append(keywords.Patterns, pattern{
+ Name: "keyword.other.unit.teal",
+ Match: fmt.Sprintf("^(%s)\\b", strings.Join(allAccess, "|")),
+ })
+ keywords.Patterns = append(keywords.Patterns, pattern{
Name: "keyword.operator.teal",
Match: fmt.Sprintf("^(%s)\\b", strings.Join(allArithmetics, "|")),
})
diff --git a/cmd/pingpong/README.md b/cmd/pingpong/README.md
new file mode 100644
index 000000000..738e57d49
--- /dev/null
+++ b/cmd/pingpong/README.md
@@ -0,0 +1,8 @@
+# PingPong usage
+
+Example:
+`pingpong run -d {node data directory} --numapp 10 --numboxread 4 --tps 200 --refresh 1800 --numaccounts 500 --duration 120`
+
+Note: if you don't set the `--duration` parameter the test will continue running until it's stopped externally.
+
+`pingpong run -h` will describe each CLI parameter. \ No newline at end of file
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index d49cea843..4e516423c 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -59,6 +59,20 @@ var teal string
var groupSize uint32
var numAsset uint32
var numApp uint32
+
+/*
+Note on box workloads:
+
+two different box workloads are supported in order to exercise different
+portions of the performance critical codepath while keeping the app programs
+relatively simple. The BoxUpdate workload updates the content of the boxes
+during every app call, to verify that box manipulation is performant. The BoxRead
+workload only reads the box contents, which requires every box read to work its
+way through the in memory state deltas, into the box cache, and potentially all the
+way to the database.
+*/
+var numBoxUpdate uint32
+var numBoxRead uint32
var numAppOptIn uint32
var appProgOps uint32
var appProgHashes uint32
@@ -105,6 +119,8 @@ func init() {
runCmd.Flags().Uint32Var(&groupSize, "groupsize", 1, "The number of transactions in each group")
runCmd.Flags().Uint32Var(&numAsset, "numasset", 0, "The number of assets each account holds")
runCmd.Flags().Uint32Var(&numApp, "numapp", 0, "The total number of apps to create")
+ runCmd.Flags().Uint32Var(&numBoxUpdate, "numboxupdate", 0, "The total number of boxes each app holds, where boxes are updated each app call. Only one of numboxupdate and numboxread can be set")
+ runCmd.Flags().Uint32Var(&numBoxRead, "numboxread", 0, "The total number of boxes each app holds, where boxes are only read each app call. Only one of numboxupdate and numboxread can be set.")
runCmd.Flags().Uint32Var(&numAppOptIn, "numappoptin", 0, "The number of apps each account opts in to")
runCmd.Flags().Uint32Var(&appProgOps, "appprogops", 0, "The approximate number of TEAL operations to perform in each ApplicationCall transaction")
runCmd.Flags().Uint32Var(&appProgHashes, "appproghashes", 0, "The number of hashes to include in the Application")
@@ -360,6 +376,32 @@ var runCmd = &cobra.Command{
cfg.AppLocalKeys = appProgLocalKeys
}
+ // verify and set numBoxUpdate
+ if numBoxUpdate != 0 && numApp == 0 {
+ reportErrorf("If number of boxes is nonzero than number of apps must also be nonzero")
+ }
+
+ if numBoxUpdate <= 8 {
+ cfg.NumBoxUpdate = numBoxUpdate
+ } else {
+ reportErrorf("Invalid number of boxes: %d, (valid number: 0 - 8)\n", numBoxUpdate)
+ }
+
+ // verify and set numBoxRead
+ if numBoxRead != 0 && numApp == 0 {
+ reportErrorf("If number of boxes is nonzero than number of apps must also be nonzero")
+ }
+
+ if numBoxRead != 0 && numBoxUpdate != 0 {
+ reportErrorf("Only one of numboxread or numboxupdate can be nonzero")
+ }
+
+ if numBoxRead <= 8 {
+ cfg.NumBoxRead = numBoxRead
+ } else {
+ reportErrorf("Invalid number of boxes: %d, (valid number: 0 - 8)\n", numBoxRead)
+ }
+
if rekey {
cfg.Rekey = rekey
if !cfg.RandomLease && !cfg.RandomNote && !cfg.RandomizeFee && !cfg.RandomizeAmt {
diff --git a/cmd/tealdbg/dryrunRequest.go b/cmd/tealdbg/dryrunRequest.go
index b1ec50cda..b6973cec9 100644
--- a/cmd/tealdbg/dryrunRequest.go
+++ b/cmd/tealdbg/dryrunRequest.go
@@ -23,7 +23,7 @@ import (
"github.com/algorand/go-algorand/protocol"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
)
// ddrFromParams converts serialized DryrunRequest to v2.DryrunRequest
@@ -32,7 +32,7 @@ func ddrFromParams(dp *DebugParams) (ddr v2.DryrunRequest, err error) {
return
}
- var gdr generatedV2.DryrunRequest
+ var gdr model.DryrunRequest
err1 := protocol.DecodeJSON(dp.DdrBlob, &gdr)
if err1 == nil {
ddr, err = v2.DryrunRequestFromGenerated(&gdr)
@@ -47,7 +47,7 @@ func ddrFromParams(dp *DebugParams) (ddr v2.DryrunRequest, err error) {
return
}
-func convertAccounts(accounts []generatedV2.Account) (records []basics.BalanceRecord, err error) {
+func convertAccounts(accounts []model.Account) (records []basics.BalanceRecord, err error) {
for _, a := range accounts {
var addr basics.Address
addr, err = basics.UnmarshalChecksumAddress(a.Address)
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index c0a6cd723..b045466df 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -26,7 +26,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -42,7 +42,7 @@ type AccountIndexerResponse struct {
//
// Definition:
// data/basics/userBalance.go : AccountData
- Account generated.Account `json:"account"`
+ Account model.Account `json:"account"`
// Round at which the results were computed.
CurrentRound uint64 `json:"current-round"`
@@ -52,7 +52,7 @@ type AccountIndexerResponse struct {
type ApplicationIndexerResponse struct {
// Application index and its parameters
- Application generated.Application `json:"application,omitempty"`
+ Application model.Application `json:"application,omitempty"`
// Round at which the results were computed.
CurrentRound uint64 `json:"current-round"`
@@ -321,6 +321,10 @@ func (l *localLedger) LookupApplication(rnd basics.Round, addr basics.Address, a
return result, nil
}
+func (l *localLedger) LookupKv(rnd basics.Round, name string) ([]byte, error) {
+ return nil, fmt.Errorf("boxes not implemented in debugger")
+}
+
func (l *localLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
ad := l.balances[addr]
// Clear RewardsBase since tealdbg has no idea about rewards level so the underlying calculation with reward will fail.
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index c92113d70..00758de72 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -52,13 +52,13 @@ func (m *MockCatchpointCatchupAccessor) ResetStagingBalances(ctx context.Context
return nil
}
-// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
-func (m *MockCatchpointCatchupAccessor) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *ledger.CatchpointCatchupAccessorProgress) (err error) {
+// ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+func (m *MockCatchpointCatchupAccessor) ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *ledger.CatchpointCatchupAccessorProgress) (err error) {
return nil
}
// BuildMerkleTrie inserts the account hashes into the merkle trie
-func (m *MockCatchpointCatchupAccessor) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error) {
+func (m *MockCatchpointCatchupAccessor) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) {
return nil
}
diff --git a/config/config_test.go b/config/config_test.go
index 1e1915faa..4434cc3ae 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -302,7 +302,7 @@ func TestConfigMigrateFromDisk(t *testing.T) {
a.NoError(err)
modified, err := migrate(c)
a.NoError(err)
- a.Equal(defaultLocal, modified)
+ a.Equal(defaultLocal, modified, "config-v%d.json", configVersion)
}
cNext := Local{Version: getLatestConfigVersion() + 1}
@@ -486,7 +486,7 @@ func TestLocalStructTags(t *testing.T) {
localType := reflect.TypeOf(Local{})
versionField, ok := localType.FieldByName("Version")
- require.True(t, true, ok)
+ require.True(t, ok)
ver := 0
versionTags := []string{}
for {
@@ -503,7 +503,7 @@ func TestLocalStructTags(t *testing.T) {
if field.Tag == "" {
require.Failf(t, "Field is missing versioning information", "Field Name: %s", field.Name)
}
- // the field named "Version" is tested separatly in TestLocalVersionField, so we'll be skipping
+ // the field named "Version" is tested separately in TestLocalVersionField, so we'll be skipping
// it on this test.
if field.Name == "Version" {
continue
diff --git a/config/consensus.go b/config/consensus.go
index 71b54daa7..bd67aa340 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -271,7 +271,7 @@ type ConsensusParams struct {
// be read in the transaction
MaxAppTxnForeignAssets int
- // maximum number of "foreign references" (accounts, asa, app)
+ // maximum number of "foreign references" (accounts, asa, app, boxes)
// that can be attached to a single app call.
MaxAppTotalTxnReferences int
@@ -331,6 +331,26 @@ type ConsensusParams struct {
// []byte values stored in LocalState or GlobalState key/value stores
SchemaBytesMinBalance uint64
+ // Maximum length of a box (Does not include name/key length. That is capped by MaxAppKeyLen)
+ MaxBoxSize uint64
+
+ // Minimum Balance Requirement (MBR) per box created (this accounts for a
+ // bit of overhead used to store the box bytes)
+ BoxFlatMinBalance uint64
+
+ // MBR per byte of box storage. MBR is incremented by BoxByteMinBalance * (len(name)+len(value))
+ BoxByteMinBalance uint64
+
+ // Number of box references allowed
+ MaxAppBoxReferences int
+
+ // Amount added to a txgroup's box I/O budget per box ref supplied.
+ // For reads: the sum of the sizes of all boxes in the group must be less than I/O budget
+ // For writes: the sum of the sizes of all boxes created or written must be less than I/O budget
+ // In both cases, what matters is the sizes of the boxes touched, not the
+ // number of times they are touched, or the size of the touches.
+ BytesPerBoxReference uint64
+
// maximum number of total key/value pairs allowed by a given
// LocalStateSchema (and therefore allowed in LocalState)
MaxLocalSchemaEntries uint64
@@ -1208,12 +1228,27 @@ func initConsensusProtocols() {
v33.ApprovedUpgrades[protocol.ConsensusV35] = 10000
v34.ApprovedUpgrades[protocol.ConsensusV35] = 10000
+ v36 := v35
+ v36.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // Boxes (unlimited global storage)
+ v36.LogicSigVersion = 8
+ v36.MaxBoxSize = 32768
+ v36.BoxFlatMinBalance = 2500
+ v36.BoxByteMinBalance = 400
+ v36.MaxAppBoxReferences = 8
+ v36.BytesPerBoxReference = 1024
+
+ Consensus[protocol.ConsensusV36] = v36
+
+ v35.ApprovedUpgrades[protocol.ConsensusV36] = 140000
+
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
- vFuture := v35
+ vFuture := v36
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.LogicSigVersion = 8 // When moving this to a release, put a new higher LogicSigVersion here
+ vFuture.LogicSigVersion = 9 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
@@ -1241,6 +1276,12 @@ func initConsensusProtocols() {
vAlpha4.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusVAlpha4] = vAlpha4
vAlpha3.ApprovedUpgrades[protocol.ConsensusVAlpha4] = 10000
+
+ // vAlpha5 uses the same parameters as v36
+ vAlpha5 := v36
+ vAlpha5.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusVAlpha5] = vAlpha5
+ vAlpha4.ApprovedUpgrades[protocol.ConsensusVAlpha5] = 10000
}
// Global defines global Algorand protocol parameters which should not be overridden.
diff --git a/config/localTemplate.go b/config/localTemplate.go
index ed6eb4493..ba9d97db7 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,7 +41,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitely, otherwise, only the most recent blocks
@@ -452,6 +452,14 @@ type Local struct {
// MaxAcctLookback sets the maximum lookback range for account states,
// i.e. the ledger can answer account states questions for the range Latest-MaxAcctLookback...Latest
MaxAcctLookback uint64 `version[23]:"4"`
+
+ // EnableUsageLog enables 10Hz log of CPU and RAM usage.
+ // Also adds 'algod_ram_usage` (number of bytes in use) to /metrics
+ EnableUsageLog bool `version[24]:"false"`
+
+ // MaxAPIBoxPerApplication defines the maximum total number of boxes per application that will be returned
+ // in GetApplicationBoxes REST API responses.
+ MaxAPIBoxPerApplication uint64 `version[25]:"100000"`
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 2aa46eef1..487149da9 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 24,
+ Version: 25,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 7,
@@ -70,6 +70,7 @@ var defaultLocal = Local{
EnableRequestLogger: false,
EnableRuntimeMetrics: false,
EnableTopAccountsReporting: false,
+ EnableUsageLog: false,
EnableVerbosedTransactionSyncLogging: false,
EndpointAddress: "127.0.0.1:0",
FallbackDNSResolverAddress: "",
@@ -84,6 +85,7 @@ var defaultLocal = Local{
LogArchiveMaxAge: "",
LogArchiveName: "node.archive.log",
LogSizeLimit: 1073741824,
+ MaxAPIBoxPerApplication: 100000,
MaxAPIResourcesPerAccount: 100000,
MaxAcctLookback: 4,
MaxCatchpointDownloadDuration: 7200000000000,
diff --git a/config/version.go b/config/version.go
index 0927cc14b..d35a252ad 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 11
+const VersionMinor = 12
// Version is the type holding our full version information.
type Version struct {
diff --git a/daemon/algod/api/Makefile b/daemon/algod/api/Makefile
index cfebb428f..12bf34fe9 100644
--- a/daemon/algod/api/Makefile
+++ b/daemon/algod/api/Makefile
@@ -2,22 +2,25 @@ GOPATH := $(shell go env GOPATH)
GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
# `make all` or just `make` should be appropriate for dev work
-all: server/v2/generated/types.go server/v2/generated/routes.go server/v2/generated/private/types.go server/v2/generated/private/routes.go
+all: server/v2/generated/model/types.go server/v2/generated/nonparticipating/public/routes.go server/v2/generated/nonparticipating/private/routes.go server/v2/generated/participating/public/routes.go server/v2/generated/participating/private/routes.go
# `make generate` should be able to replace old `generate.sh` script and be appropriate for build system use
generate: oapi-codegen all
-server/v2/generated/types.go: algod.oas3.yml
- $(GOPATH1)/bin/oapi-codegen -package generated -type-mappings integer=uint64 -generate types -exclude-tags=private,common -o ./server/v2/generated/types.go algod.oas3.yml
+server/v2/generated/nonparticipating/public/routes.go: algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/nonparticipating/public/public_routes.yml algod.oas3.yml
-server/v2/generated/routes.go: algod.oas3.yml
- $(GOPATH1)/bin/oapi-codegen -package generated -type-mappings integer=uint64 -generate server,spec -exclude-tags=private,common -o ./server/v2/generated/routes.go algod.oas3.yml
+server/v2/generated/nonparticipating/private/routes.go: algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/nonparticipating/private/private_routes.yml algod.oas3.yml
-server/v2/generated/private/types.go: algod.oas3.yml
- $(GOPATH1)/bin/oapi-codegen -package private -type-mappings integer=uint64 -generate types -include-tags=private -o ./server/v2/generated/private/types.go algod.oas3.yml
+server/v2/generated/participating/public/routes.go: algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/participating/public/public_routes.yml algod.oas3.yml
-server/v2/generated/private/routes.go: algod.oas3.yml
- $(GOPATH1)/bin/oapi-codegen -package private -type-mappings integer=uint64 -generate server,spec -include-tags=private -o ./server/v2/generated/private/routes.go algod.oas3.yml
+server/v2/generated/participating/private/routes.go: algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/participating/private/private_routes.yml algod.oas3.yml
+
+server/v2/generated/model/types.go: algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -config ./server/v2/generated/model/model_types.yml algod.oas3.yml
algod.oas3.yml: algod.oas2.json
curl -s -X POST "https://converter.swagger.io/api/convert" -H "accept: application/json" -H "Content-Type: application/json" -d @./algod.oas2.json -o .3tmp.json
@@ -28,6 +31,6 @@ oapi-codegen: .PHONY
../../../scripts/buildtools/install_buildtools.sh -o github.com/algorand/oapi-codegen -c github.com/algorand/oapi-codegen/cmd/oapi-codegen
clean:
- rm -rf server/v2/generated/types.go server/v2/generated/routes.go server/v2/generated/private/types.go server/v2/generated/private/routes.go algod.oas3.yml
+ rm -rf server/v2/generated/model/types.go server/v2/generated/nonparticipating/public/routes.go server/v2/generated/nonparticipating/private/routes.go server/v2/generated/participating/public/routes.go server/v2/generated/participating/private/routes.go algod.oas3.yml
.PHONY:
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 6744d6149..3961ad0b8 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -26,6 +26,7 @@
"/health": {
"get": {
"tags": [
+ "public",
"common"
],
"produces": [
@@ -49,6 +50,7 @@
"/metrics": {
"get": {
"tags": [
+ "public",
"common"
],
"produces": [
@@ -73,6 +75,7 @@
"get": {
"description": "Returns the entire genesis file in json.",
"tags": [
+ "public",
"common"
],
"produces": [
@@ -100,6 +103,7 @@
"get": {
"description": "Returns the entire swagger spec in json.",
"tags": [
+ "public",
"common"
],
"produces": [
@@ -127,6 +131,7 @@
"get": {
"description": "Retrieves the supported API versions, binary build versions, and genesis information.",
"tags": [
+ "public",
"common"
],
"produces": [
@@ -146,6 +151,10 @@
"/v2/accounts/{address}": {
"get": {
"description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -227,6 +236,10 @@
"/v2/accounts/{address}/assets/{asset-id}": {
"get": {
"description": "Given a specific account public key and asset ID, this call returns the account's asset holding and asset parameters (if either exist). Asset parameters will only be returned if the provided address is the asset's creator.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -304,6 +317,10 @@
"/v2/accounts/{address}/applications/{application-id}": {
"get": {
"description": "Given a specific account public key and application ID, this call returns the account's application local state and global state (AppLocalState and AppParams, if either exists). Global state will only be returned if the provided address is the application's creator.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -381,6 +398,10 @@
"/v2/accounts/{address}/transactions/pending": {
"get": {
"description": "Get the list of pending transactions by address, sorted by priority, in decreasing order, truncated at the end at MAX. If MAX = 0, returns all pending transactions.\n",
+ "tags": [
+ "public",
+ "participating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -450,6 +471,10 @@
},
"/v2/blocks/{round}": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -525,6 +550,10 @@
},
"/v2/blocks/{round}/hash": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -579,6 +608,10 @@
},
"/v2/blocks/{round}/transactions/{txid}/proof": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -668,6 +701,10 @@
},
"/v2/ledger/supply": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -695,7 +732,8 @@
"/v2/participation": {
"get": {
"tags": [
- "private"
+ "private",
+ "participating"
],
"description": "Return a list of participation keys",
"produces": [
@@ -742,7 +780,8 @@
},
"post": {
"tags": [
- "private"
+ "private",
+ "participating"
],
"consumes": [
"application/msgpack"
@@ -810,7 +849,8 @@
"/v2/participation/{participation-id}": {
"delete": {
"tags": [
- "private"
+ "private",
+ "participating"
],
"description": "Delete a given participation key by ID",
"produces": [
@@ -856,7 +896,8 @@
},
"get": {
"tags": [
- "private"
+ "private",
+ "participating"
],
"description": "Given a participation ID, return information about that participation key",
"produces": [
@@ -903,7 +944,8 @@
},
"post": {
"tags": [
- "private"
+ "private",
+ "participating"
],
"description": "Given a participation ID, append state proof keys to a particular set of participation keys",
"consumes": [
@@ -976,7 +1018,8 @@
"post": {
"description": "Special management endpoint to shutdown the node. Optionally provide a timeout parameter to indicate that the node should begin shutting down after a number of seconds.",
"tags": [
- "private"
+ "private",
+ "nonparticipating"
],
"operationId": "ShutdownNode",
"parameters": [
@@ -998,6 +1041,10 @@
},
"/v2/status": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1030,6 +1077,10 @@
},
"/v2/status/wait-for-block-after/{round}": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"description": "Waits for a block to appear after round {round} and returns the node's status at the time.",
"produces": [
"application/json"
@@ -1093,6 +1144,10 @@
},
"/v2/transactions": {
"post": {
+ "tags": [
+ "public",
+ "participating"
+ ],
"consumes": [
"application/x-binary"
],
@@ -1152,6 +1207,10 @@
},
"/v2/transactions/params": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1191,6 +1250,10 @@
"/v2/transactions/pending": {
"get": {
"description": "Get the list of pending transactions, sorted by priority, in decreasing order, truncated at the end at MAX. If MAX = 0, returns all pending transactions.\n",
+ "tags": [
+ "public",
+ "participating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -1239,6 +1302,10 @@
"/v2/transactions/pending/{txid}": {
"get": {
"description": "Given a transaction ID of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round \u003e 0)\n- transaction still in the pool (committed round = 0, pool error = \"\")\n- transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n",
+ "tags": [
+ "public",
+ "participating"
+ ],
"produces": [
"application/json",
"application/msgpack"
@@ -1302,6 +1369,10 @@
},
"/v2/stateproofs/{round}": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1370,6 +1441,10 @@
},
"/v2/blocks/{round}/lightheader/proof": {
"get": {
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1439,6 +1514,10 @@
"/v2/applications/{application-id}": {
"get": {
"description": "Given a application ID, it returns application information including creator, approval and clear programs, global and local schemas, and global state.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1499,9 +1578,160 @@
}
]
},
+ "/v2/applications/{application-id}/boxes": {
+ "get": {
+ "description": "Given an application ID, return all Box names. No particular ordering is guaranteed. Request fails when client or server-side configured limits prevent returning all Box names.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get all box names for a given application.",
+ "operationId": "GetApplicationBoxes",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "An application identifier",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.",
+ "name": "max",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/BoxesResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ }
+ ]
+ },
+ "/v2/applications/{application-id}/box": {
+ "get": {
+ "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get box information for a given application.",
+ "operationId": "GetApplicationBoxByName",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "An application identifier",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "name": "name",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/BoxResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Box Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "name",
+ "in": "query",
+ "required": true
+ }
+ ]
+ },
"/v2/assets/{asset-id}": {
"get": {
"description": "Given a asset ID, it returns asset information including creator, name, total supply and special addresses.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"produces": [
"application/json"
],
@@ -1565,6 +1795,10 @@
"/v2/teal/compile": {
"post": {
"description": "Given TEAL source code in plain text, return base64 encoded program bytes and base32 SHA512_256 hash of program bytes (Address style). This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"consumes": [
"text/plain"
],
@@ -1629,6 +1863,10 @@
"/v2/teal/disassemble": {
"post": {
"description": "Given the program bytes, return the TEAL source code in plain text. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"consumes": [
"application/x-binary"
],
@@ -1687,7 +1925,8 @@
"/v2/catchup/{catchpoint}": {
"post": {
"tags": [
- "private"
+ "private",
+ "nonparticipating"
],
"description": "Given a catchpoint, it starts catching up to this catchpoint",
"produces": [
@@ -1737,7 +1976,8 @@
},
"delete": {
"tags": [
- "private"
+ "private",
+ "nonparticipating"
],
"description": "Given a catchpoint, it aborts catching up to this catchpoint",
"produces": [
@@ -1792,6 +2032,10 @@
"/v2/teal/dryrun": {
"post": {
"description": "Executes TEAL program(s) in context and returns debugging information about the execution. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"consumes": [
"application/json",
"application/msgpack"
@@ -1934,6 +2178,14 @@
"description": "The count of all assets (AssetParams objects) created by this account.",
"type": "integer"
},
+ "total-boxes": {
+ "description": "\\[tbx\\] The number of existing boxes created by this account's app.",
+ "type": "integer"
+ },
+ "total-box-bytes": {
+ "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.",
+ "type": "integer"
+ },
"participation": {
"$ref": "#/definitions/AccountParticipation"
},
@@ -2596,6 +2848,40 @@
}
}
},
+ "Box": {
+ "description": "Box name and its content.",
+ "type": "object",
+ "required": [
+ "name",
+ "value"
+ ],
+ "properties": {
+ "name": {
+ "description": "\\[name\\] box name, base64 encoded",
+ "type": "string",
+ "format": "byte"
+ },
+ "value": {
+ "description": "\\[value\\] box value, base64 encoded.",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
+ "BoxDescriptor": {
+ "description": "Box descriptor describes a Box.",
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "description": "Base64 encoded box name",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
"Version": {
"description": "algod version information.",
"type": "object",
@@ -3237,6 +3523,18 @@
"description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-total-kvs": {
+ "description": "The total number of key-values (KVs) included in the current catchpoint",
+ "type": "integer"
+ },
+ "catchpoint-processed-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup",
+ "type": "integer"
+ },
+ "catchpoint-verified-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup",
+ "type": "integer"
+ },
"catchpoint-total-blocks": {
"description": "The total number of blocks that are required to complete the current catchpoint catchup",
"type": "integer"
@@ -3393,6 +3691,29 @@
"$ref": "#/definitions/Application"
}
},
+ "BoxesResponse": {
+ "description": "Box names of an application",
+ "schema": {
+ "type": "object",
+ "required": [
+ "boxes"
+ ],
+ "properties": {
+ "boxes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/BoxDescriptor"
+ }
+ }
+ }
+ }
+ },
+ "BoxResponse": {
+ "description": "Box information",
+ "schema": {
+ "$ref": "#/definitions/Box"
+ }
+ },
"AssetResponse": {
"description": "Asset information",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 0a15f51bb..0dd7a6c2e 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -350,6 +350,37 @@
},
"description": "Encoded block object."
},
+ "BoxResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Box"
+ }
+ }
+ },
+ "description": "Box information"
+ },
+ "BoxesResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "boxes": {
+ "items": {
+ "$ref": "#/components/schemas/BoxDescriptor"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "boxes"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Box names of an application"
+ },
"CatchpointAbortResponse": {
"content": {
"application/json": {
@@ -494,6 +525,10 @@
"description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-processed-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup",
+ "type": "integer"
+ },
"catchpoint-total-accounts": {
"description": "The total number of accounts included in the current catchpoint",
"type": "integer"
@@ -502,10 +537,18 @@
"description": "The total number of blocks that are required to complete the current catchpoint catchup",
"type": "integer"
},
+ "catchpoint-total-kvs": {
+ "description": "The total number of key-values (KVs) included in the current catchpoint",
+ "type": "integer"
+ },
"catchpoint-verified-accounts": {
"description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-verified-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup",
+ "type": "integer"
+ },
"catchup-time": {
"description": "CatchupTime in nanoseconds",
"type": "integer"
@@ -895,6 +938,14 @@
"description": "The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.",
"type": "integer"
},
+ "total-box-bytes": {
+ "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.",
+ "type": "integer"
+ },
+ "total-boxes": {
+ "description": "\\[tbx\\] The number of existing boxes created by this account's app.",
+ "type": "integer"
+ },
"total-created-apps": {
"description": "The count of all apps (AppParams objects) created by this account.",
"type": "integer"
@@ -1201,6 +1252,43 @@
],
"type": "object"
},
+ "Box": {
+ "description": "Box name and its content.",
+ "properties": {
+ "name": {
+ "description": "\\[name\\] box name, base64 encoded",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "value": {
+ "description": "\\[value\\] box value, base64 encoded.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "type": "object"
+ },
+ "BoxDescriptor": {
+ "description": "Box descriptor describes a Box.",
+ "properties": {
+ "name": {
+ "description": "Base64 encoded box name",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "type": "object"
+ },
"BuildVersion": {
"properties": {
"branch": {
@@ -1818,6 +1906,7 @@
},
"summary": "Gets the genesis information.",
"tags": [
+ "public",
"common"
]
}
@@ -1837,6 +1926,7 @@
},
"summary": "Returns OK if healthy.",
"tags": [
+ "public",
"common"
]
}
@@ -1856,6 +1946,7 @@
},
"summary": "Return metrics about algod functioning.",
"tags": [
+ "public",
"common"
]
}
@@ -1882,6 +1973,7 @@
},
"summary": "Gets the current swagger spec.",
"tags": [
+ "public",
"common"
]
}
@@ -1992,7 +2084,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get account information."
+ "summary": "Get account information.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/accounts/{address}/applications/{application-id}": {
@@ -2128,7 +2224,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get account information about a given app."
+ "summary": "Get account information about a given app.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/accounts/{address}/assets/{asset-id}": {
@@ -2264,7 +2364,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get account information about a given asset."
+ "summary": "Get account information about a given asset.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/accounts/{address}/transactions/pending": {
@@ -2424,7 +2528,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get a list of unconfirmed transactions currently in the transaction pool by address."
+ "summary": "Get a list of unconfirmed transactions currently in the transaction pool by address.",
+ "tags": [
+ "public",
+ "participating"
+ ]
}
},
"/v2/applications/{application-id}": {
@@ -2498,7 +2606,185 @@
"description": "Unknown Error"
}
},
- "summary": "Get application information."
+ "summary": "Get application information.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
+ }
+ },
+ "/v2/applications/{application-id}/box": {
+ "get": {
+ "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "operationId": "GetApplicationBoxByName",
+ "parameters": [
+ {
+ "description": "An application identifier",
+ "in": "path",
+ "name": "application-id",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "in": "query",
+ "name": "name",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Box"
+ }
+ }
+ },
+ "description": "Box information"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Box Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get box information for a given application.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
+ }
+ },
+ "/v2/applications/{application-id}/boxes": {
+ "get": {
+ "description": "Given an application ID, return all Box names. No particular ordering is guaranteed. Request fails when client or server-side configured limits prevent returning all Box names.",
+ "operationId": "GetApplicationBoxes",
+ "parameters": [
+ {
+ "description": "An application identifier",
+ "in": "path",
+ "name": "application-id",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.",
+ "in": "query",
+ "name": "max",
+ "schema": {
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "boxes": {
+ "items": {
+ "$ref": "#/components/schemas/BoxDescriptor"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "boxes"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Box names of an application"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get all box names for a given application.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/assets/{asset-id}": {
@@ -2572,7 +2858,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get asset information."
+ "summary": "Get asset information.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/blocks/{round}": {
@@ -2717,7 +3007,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get the block for the given round."
+ "summary": "Get the block for the given round.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/blocks/{round}/hash": {
@@ -2800,7 +3094,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get the block hash for the block on the given round."
+ "summary": "Get the block hash for the block on the given round.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/blocks/{round}/lightheader/proof": {
@@ -2884,7 +3182,11 @@
"description": "Unknown Error"
}
},
- "summary": "Gets a proof for a given light block header inside a state proof commitment"
+ "summary": "Gets a proof for a given light block header inside a state proof commitment",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/blocks/{round}/transactions/{txid}/proof": {
@@ -3028,7 +3330,11 @@
"description": "Unknown error"
}
},
- "summary": "Get a proof for a transaction in a block."
+ "summary": "Get a proof for a transaction in a block.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/catchup/{catchpoint}": {
@@ -3107,7 +3413,8 @@
},
"summary": "Aborts a catchpoint catchup.",
"tags": [
- "private"
+ "private",
+ "nonparticipating"
]
},
"post": {
@@ -3204,7 +3511,8 @@
},
"summary": "Starts a catchpoint catchup.",
"tags": [
- "private"
+ "private",
+ "nonparticipating"
]
}
},
@@ -3257,7 +3565,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get the current supply reported by the ledger."
+ "summary": "Get the current supply reported by the ledger.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/participation": {
@@ -3325,7 +3637,8 @@
},
"summary": "Return a list of participation keys",
"tags": [
- "private"
+ "private",
+ "participating"
]
},
"post": {
@@ -3419,7 +3732,8 @@
},
"summary": "Add a participation key to the node",
"tags": [
- "private"
+ "private",
+ "participating"
],
"x-codegen-request-body-name": "participationkey"
}
@@ -3490,7 +3804,8 @@
},
"summary": "Delete a given participation key by ID",
"tags": [
- "private"
+ "private",
+ "participating"
]
},
"get": {
@@ -3564,7 +3879,8 @@
},
"summary": "Get participation key info given a participation ID",
"tags": [
- "private"
+ "private",
+ "participating"
]
},
"post": {
@@ -3650,7 +3966,8 @@
},
"summary": "Append state proof keys to a participation key",
"tags": [
- "private"
+ "private",
+ "participating"
],
"x-codegen-request-body-name": "keymap"
}
@@ -3681,7 +3998,8 @@
}
},
"tags": [
- "private"
+ "private",
+ "nonparticipating"
]
}
},
@@ -3766,7 +4084,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get a state proof that covers a given round"
+ "summary": "Get a state proof that covers a given round",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/status": {
@@ -3791,6 +4113,10 @@
"description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-processed-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup",
+ "type": "integer"
+ },
"catchpoint-total-accounts": {
"description": "The total number of accounts included in the current catchpoint",
"type": "integer"
@@ -3799,10 +4125,18 @@
"description": "The total number of blocks that are required to complete the current catchpoint catchup",
"type": "integer"
},
+ "catchpoint-total-kvs": {
+ "description": "The total number of key-values (KVs) included in the current catchpoint",
+ "type": "integer"
+ },
"catchpoint-verified-accounts": {
"description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-verified-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup",
+ "type": "integer"
+ },
"catchup-time": {
"description": "CatchupTime in nanoseconds",
"type": "integer"
@@ -3880,7 +4214,11 @@
"description": "Unknown Error"
}
},
- "summary": "Gets the current node status."
+ "summary": "Gets the current node status.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/status/wait-for-block-after/{round}": {
@@ -3918,6 +4256,10 @@
"description": "The number of accounts from the current catchpoint that have been processed so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-processed-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup",
+ "type": "integer"
+ },
"catchpoint-total-accounts": {
"description": "The total number of accounts included in the current catchpoint",
"type": "integer"
@@ -3926,10 +4268,18 @@
"description": "The total number of blocks that are required to complete the current catchpoint catchup",
"type": "integer"
},
+ "catchpoint-total-kvs": {
+ "description": "The total number of key-values (KVs) included in the current catchpoint",
+ "type": "integer"
+ },
"catchpoint-verified-accounts": {
"description": "The number of accounts from the current catchpoint that have been verified so far as part of the catchup",
"type": "integer"
},
+ "catchpoint-verified-kvs": {
+ "description": "The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup",
+ "type": "integer"
+ },
"catchup-time": {
"description": "CatchupTime in nanoseconds",
"type": "integer"
@@ -4027,7 +4377,11 @@
"description": "Unknown Error"
}
},
- "summary": "Gets the node status after waiting for the given round."
+ "summary": "Gets the node status after waiting for the given round.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/teal/compile": {
@@ -4126,6 +4480,10 @@
}
},
"summary": "Compile TEAL source code to binary, produce its hash",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"x-codegen-request-body-name": "source"
}
},
@@ -4205,6 +4563,10 @@
}
},
"summary": "Disassemble program bytes into the TEAL source code.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"x-codegen-request-body-name": "source"
}
},
@@ -4299,6 +4661,10 @@
}
},
"summary": "Provide debugging information for a transaction (or group).",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
"x-codegen-request-body-name": "request"
}
},
@@ -4383,6 +4749,10 @@
}
},
"summary": "Broadcasts a raw transaction to the network.",
+ "tags": [
+ "public",
+ "participating"
+ ],
"x-codegen-request-body-name": "rawtxn"
}
},
@@ -4473,7 +4843,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get parameters for constructing a new transaction"
+ "summary": "Get parameters for constructing a new transaction",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
}
},
"/v2/transactions/pending": {
@@ -4608,7 +4982,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get a list of unconfirmed transactions currently in the transaction pool."
+ "summary": "Get a list of unconfirmed transactions currently in the transaction pool.",
+ "tags": [
+ "public",
+ "participating"
+ ]
}
},
"/v2/transactions/pending/{txid}": {
@@ -4705,7 +5083,11 @@
"description": "Unknown Error"
}
},
- "summary": "Get a specific pending transaction."
+ "summary": "Get a specific pending transaction.",
+ "tags": [
+ "public",
+ "participating"
+ ]
}
},
"/versions": {
@@ -4725,6 +5107,7 @@
}
},
"tags": [
+ "public",
"common"
]
}
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 2bece32a6..7d56a0ab7 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -29,12 +29,9 @@ import (
"github.com/google/go-querystring/query"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- privateV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
-
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
@@ -48,7 +45,7 @@ const (
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
- "/v1/transactions": true,
+ "/v2/transactions": true,
"/v2/teal/dryrun": true,
"/v2/teal/compile": true,
"/v2/participation": true,
@@ -116,7 +113,33 @@ func extractError(resp *http.Response) error {
}
errorBuf, _ := io.ReadAll(resp.Body) // ignore returned error
- errorString := filterASCII(string(errorBuf))
+ var errorJSON model.ErrorResponse
+ decodeErr := json.Unmarshal(errorBuf, &errorJSON)
+
+ var errorString string
+ if decodeErr == nil {
+ if errorJSON.Data == nil {
+ // There's no additional data, so let's just use the message
+ errorString = errorJSON.Message
+ } else {
+ // There's additional data, so let's re-encode the JSON response to show everything.
+ // We do this because the original response is likely encoded with escapeHTML=true, but
+ // since this isn't a webpage that extra encoding is not preferred.
+ var buffer strings.Builder
+ enc := json.NewEncoder(&buffer)
+ enc.SetEscapeHTML(false)
+ encErr := enc.Encode(errorJSON)
+ if encErr != nil {
+ // This really shouldn't happen, but if it does let's default to errorBuff
+ errorString = string(errorBuf)
+ } else {
+ errorString = buffer.String()
+ }
+ }
+ } else {
+ errorString = string(errorBuf)
+ }
+ errorString = filterASCII(errorString)
if resp.StatusCode == http.StatusUnauthorized {
apiToken := resp.Request.Header.Get(authHeader)
@@ -134,6 +157,11 @@ func stripTransaction(tx string) string {
return tx
}
+// RawResponse is fulfilled by responses that should not be decoded as json
+type RawResponse interface {
+ SetBytes([]byte)
+}
+
// submitForm is a helper used for submitting (ex.) GETs and POSTs to the server
func (client RestClient) submitForm(response interface{}, path string, request interface{}, requestMethod string, encodeJSON bool, decodeJSON bool) error {
var err error
@@ -196,9 +224,9 @@ func (client RestClient) submitForm(response interface{}, path string, request i
}
// Response must implement RawResponse
- raw, ok := response.(v1.RawResponse)
+ raw, ok := response.(RawResponse)
if !ok {
- return fmt.Errorf("can only decode raw response into type implementing v1.RawResponse")
+ return fmt.Errorf("can only decode raw response into type implementing RawResponse")
}
bodyBytes, err := io.ReadAll(resp.Body)
@@ -216,8 +244,8 @@ func (client RestClient) get(response interface{}, path string, request interfac
}
// getRaw behaves identically to get but doesn't json decode the response, and
-// the response must implement the v1.RawResponse interface
-func (client RestClient) getRaw(response v1.RawResponse, path string, request interface{}) error {
+// the response must implement the RawResponse interface
+func (client RestClient) getRaw(response RawResponse, path string, request interface{}) error {
return client.submitForm(response, path, request, "GET", false /* encodeJSON */, false /* decodeJSON */)
}
@@ -231,13 +259,13 @@ func (client RestClient) post(response interface{}, path string, request interfa
// Status retrieves the StatusResponse from the running node
// the StatusResponse includes data like the consensus version and current round
// Not supported
-func (client RestClient) Status() (response generatedV2.NodeStatusResponse, err error) {
+func (client RestClient) Status() (response model.NodeStatusResponse, err error) {
err = client.get(&response, "/v2/status", nil)
return
}
// WaitForBlock returns the node status after waiting for the given round.
-func (client RestClient) WaitForBlock(round basics.Round) (response generatedV2.NodeStatusResponse, err error) {
+func (client RestClient) WaitForBlock(round basics.Round) (response model.NodeStatusResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil)
return
}
@@ -248,35 +276,32 @@ func (client RestClient) HealthCheck() error {
return client.get(nil, "/health", nil)
}
-func fillNodeStatusResponse(nodeStatus v1.NodeStatus) generatedV2.NodeStatusResponse {
- return generatedV2.NodeStatusResponse{
- LastRound: nodeStatus.LastRound,
- LastVersion: nodeStatus.LastVersion,
- NextVersion: nodeStatus.NextVersion,
- NextVersionRound: nodeStatus.NextVersionRound,
- NextVersionSupported: nodeStatus.NextVersionSupported,
- TimeSinceLastRound: uint64(nodeStatus.TimeSinceLastRound),
- CatchupTime: uint64(nodeStatus.CatchupTime),
- StoppedAtUnsupportedRound: nodeStatus.StoppedAtUnsupportedRound,
- }
-}
-
// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block
// blocks on the node end
// Not supported
-func (client RestClient) StatusAfterBlock(blockNum uint64) (response generatedV2.NodeStatusResponse, err error) {
+func (client RestClient) StatusAfterBlock(blockNum uint64) (response model.NodeStatusResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil)
return
}
type pendingTransactionsParams struct {
- Max uint64 `url:"max"`
+ Max uint64 `url:"max"`
+ Format string `url:"format"`
}
// GetPendingTransactions asks algod for a snapshot of current pending txns on the node, bounded by maxTxns.
// If maxTxns = 0, fetches as many transactions as possible.
-func (client RestClient) GetPendingTransactions(maxTxns uint64) (response v1.PendingTransactions, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/transactions/pending"), pendingTransactionsParams{maxTxns})
+func (client RestClient) GetPendingTransactions(maxTxns uint64) (response model.PendingTransactionsResponse, err error) {
+ err = client.get(&response, "/v2/transactions/pending", pendingTransactionsParams{Max: maxTxns, Format: "json"})
+ return
+}
+
+// GetRawPendingTransactions gets the raw encoded msgpack transactions.
+// If maxTxns = 0, fetches as many transactions as possible.
+func (client RestClient) GetRawPendingTransactions(maxTxns uint64) (response []byte, err error) {
+ var blob Blob
+ err = client.getRaw(&blob, "/v2/transactions/pending", pendingTransactionsParams{maxTxns, "msgpack"})
+ response = blob
return
}
@@ -288,8 +313,8 @@ func (client RestClient) Versions() (response common.Version, err error) {
}
// LedgerSupply gets the supply details for the specified node's Ledger
-func (client RestClient) LedgerSupply() (response v1.Supply, err error) {
- err = client.get(&response, "/v1/ledger/supply", nil)
+func (client RestClient) LedgerSupply() (response model.SupplyResponse, err error) {
+ err = client.get(&response, "/v2/ledger/supply", nil)
return
}
@@ -330,52 +355,55 @@ type accountInformationParams struct {
Exclude string `url:"exclude"`
}
-// TransactionsByAddr returns all transactions for a PK [addr] in the [first,
-// last] rounds range.
-func (client RestClient) TransactionsByAddr(addr string, first, last, max uint64) (response v1.TransactionList, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/account/%s/transactions", addr), transactionsByAddrParams{first, last, max})
+// PendingTransactionsByAddr returns all the pending transactions for an addr.
+func (client RestClient) PendingTransactionsByAddr(addr string, max uint64) (response model.PendingTransactionsResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/transactions/pending", addr), pendingTransactionsByAddrParams{max})
return
}
-// PendingTransactionsByAddr returns all the pending transactions for a PK [addr].
-func (client RestClient) PendingTransactionsByAddr(addr string, max uint64) (response v1.PendingTransactions, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/account/%s/transactions/pending", addr), pendingTransactionsByAddrParams{max})
+// RawPendingTransactionsByAddr returns all the pending transactions for an addr in raw msgpack format.
+func (client RestClient) RawPendingTransactionsByAddr(addr string, max uint64) (response []byte, err error) {
+ var blob Blob
+ err = client.getRaw(&blob, fmt.Sprintf("/v2/accounts/%s/transactions/pending", addr), pendingTransactionsParams{max, "msgpack"})
+ response = blob
return
}
// AssetInformation gets the AssetInformationResponse associated with the passed asset index
-func (client RestClient) AssetInformation(index uint64) (response v1.AssetParams, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/asset/%d", index), nil)
+func (client RestClient) AssetInformation(index uint64) (response model.Asset, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/assets/%d", index), nil)
return
}
-// Assets gets up to max assets with maximum asset index assetIdx
-func (client RestClient) Assets(assetIdx, max uint64) (response v1.AssetList, err error) {
- err = client.get(&response, "/v1/assets", assetsParams{assetIdx, max})
+// ApplicationInformation gets the ApplicationInformationResponse associated
+// with the passed application index
+func (client RestClient) ApplicationInformation(index uint64) (response model.Application, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/applications/%d", index), nil)
return
}
-// AssetInformationV2 gets the AssetInformationResponse associated with the passed asset index
-func (client RestClient) AssetInformationV2(index uint64) (response generatedV2.Asset, err error) {
- err = client.get(&response, fmt.Sprintf("/v2/assets/%d", index), nil)
- return
+type applicationBoxesParams struct {
+ Max uint64 `url:"max,omitempty"`
}
-// ApplicationInformation gets the ApplicationInformationResponse associated
-// with the passed application index
-func (client RestClient) ApplicationInformation(index uint64) (response generatedV2.Application, err error) {
- err = client.get(&response, fmt.Sprintf("/v2/applications/%d", index), nil)
+// ApplicationBoxes gets the BoxesResponse associated with the passed application ID
+func (client RestClient) ApplicationBoxes(appID uint64, maxBoxNum uint64) (response model.BoxesResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/applications/%d/boxes", appID), applicationBoxesParams{maxBoxNum})
return
}
-// AccountInformation also gets the AccountInformationResponse associated with the passed address
-func (client RestClient) AccountInformation(address string) (response v1.Account, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/account/%s", address), nil)
+type applicationBoxByNameParams struct {
+ Name string `url:"name"`
+}
+
+// GetApplicationBoxByName gets the BoxResponse associated with the passed application ID and box name
+func (client RestClient) GetApplicationBoxByName(appID uint64, name string) (response model.BoxResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/applications/%d/box", appID), applicationBoxByNameParams{name})
return
}
-// AccountInformationV2 gets the AccountData associated with the passed address
-func (client RestClient) AccountInformationV2(address string, includeCreatables bool) (response generatedV2.Account, err error) {
+// AccountInformation gets the AccountData associated with the passed address
+func (client RestClient) AccountInformation(address string, includeCreatables bool) (response model.Account, err error) {
var infoParams accountInformationParams
if includeCreatables {
infoParams = accountInformationParams{Exclude: "none", Format: "json"}
@@ -386,7 +414,7 @@ func (client RestClient) AccountInformationV2(address string, includeCreatables
return
}
-// Blob represents arbitrary blob of data satisfying v1.RawResponse interface
+// Blob represents arbitrary blob of data satisfying RawResponse interface
type Blob []byte
// SetBytes fulfills the RawResponse interface on Blob
@@ -394,21 +422,14 @@ func (blob *Blob) SetBytes(b []byte) {
*blob = b
}
-// RawAccountInformationV2 gets the raw AccountData associated with the passed address
-func (client RestClient) RawAccountInformationV2(address string) (response []byte, err error) {
+// RawAccountInformation gets the raw AccountData associated with the passed address
+func (client RestClient) RawAccountInformation(address string) (response []byte, err error) {
var blob Blob
err = client.getRaw(&blob, fmt.Sprintf("/v2/accounts/%s", address), rawFormat{Format: "msgpack"})
response = blob
return
}
-// TransactionInformation gets information about a specific transaction involving a specific account
-func (client RestClient) TransactionInformation(accountAddress, transactionID string) (response v1.Transaction, err error) {
- transactionID = stripTransaction(transactionID)
- err = client.get(&response, fmt.Sprintf("/v1/account/%s/transaction/%s", accountAddress, transactionID), nil)
- return
-}
-
// PendingTransactionInformation gets information about a recently issued
// transaction. There are several cases when this might succeed:
//
@@ -418,22 +439,23 @@ func (client RestClient) TransactionInformation(accountAddress, transactionID st
//
// Or the transaction may have happened sufficiently long ago that the
// node no longer remembers it, and this will return an error.
-func (client RestClient) PendingTransactionInformation(transactionID string) (response v1.Transaction, err error) {
+func (client RestClient) PendingTransactionInformation(transactionID string) (response model.PendingTransactionResponse, err error) {
transactionID = stripTransaction(transactionID)
- err = client.get(&response, fmt.Sprintf("/v1/transactions/pending/%s", transactionID), nil)
+ err = client.get(&response, fmt.Sprintf("/v2/transactions/pending/%s", transactionID), nil)
return
}
-// PendingTransactionInformationV2 gets information about a recently issued transaction.
-// See PendingTransactionInformation for more details.
-func (client RestClient) PendingTransactionInformationV2(transactionID string) (response generatedV2.PendingTransactionResponse, err error) {
+// RawPendingTransactionInformation gets information about a recently issued transaction in msgpack encoded bytes.
+func (client RestClient) RawPendingTransactionInformation(transactionID string) (response []byte, err error) {
transactionID = stripTransaction(transactionID)
- err = client.get(&response, fmt.Sprintf("/v2/transactions/pending/%s", transactionID), nil)
+ var blob Blob
+ err = client.getRaw(&blob, fmt.Sprintf("/v2/transactions/pending/%s", transactionID), rawFormat{Format: "msgpack"})
+ response = blob
return
}
// AccountApplicationInformation gets account information about a given app.
-func (client RestClient) AccountApplicationInformation(accountAddress string, applicationID uint64) (response generatedV2.AccountApplicationResponse, err error) {
+func (client RestClient) AccountApplicationInformation(accountAddress string, applicationID uint64) (response model.AccountApplicationResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/applications/%d", accountAddress, applicationID), nil)
return
}
@@ -447,7 +469,7 @@ func (client RestClient) RawAccountApplicationInformation(accountAddress string,
}
// AccountAssetInformation gets account information about a given app.
-func (client RestClient) AccountAssetInformation(accountAddress string, assetID uint64) (response generatedV2.AccountAssetResponse, err error) {
+func (client RestClient) AccountAssetInformation(accountAddress string, assetID uint64) (response model.AccountAssetResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/accounts/%s/assets/%d", accountAddress, assetID), nil)
return
}
@@ -460,21 +482,15 @@ func (client RestClient) RawAccountAssetInformation(accountAddress string, asset
return
}
-// SuggestedFee gets the recommended transaction fee from the node
-func (client RestClient) SuggestedFee() (response v1.TransactionFee, err error) {
- err = client.get(&response, "/v1/transactions/fee", nil)
- return
-}
-
// SuggestedParams gets the suggested transaction parameters
-func (client RestClient) SuggestedParams() (response v1.TransactionParams, err error) {
- err = client.get(&response, "/v1/transactions/params", nil)
+func (client RestClient) SuggestedParams() (response model.TransactionParametersResponse, err error) {
+ err = client.get(&response, "/v2/transactions/params", nil)
return
}
// SendRawTransaction gets a SignedTxn and broadcasts it to the network
-func (client RestClient) SendRawTransaction(txn transactions.SignedTxn) (response v1.TransactionID, err error) {
- err = client.post(&response, "/v1/transactions", protocol.Encode(&txn))
+func (client RestClient) SendRawTransaction(txn transactions.SignedTxn) (response model.PostTransactionsResponse, err error) {
+ err = client.post(&response, "/v2/transactions", protocol.Encode(&txn))
return
}
@@ -487,13 +503,13 @@ func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTx
enc = append(enc, protocol.Encode(&tx)...)
}
- var response v1.TransactionID
- return client.post(&response, "/v1/transactions", enc)
+ var response model.PostTransactionsResponse
+ return client.post(&response, "/v2/transactions", enc)
}
// Block gets the block info for the given round
-func (client RestClient) Block(round uint64) (response v1.Block, err error) {
- err = client.get(&response, fmt.Sprintf("/v1/block/%d", round), nil)
+func (client RestClient) Block(round uint64) (response model.BlockResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/blocks/%d", round), nil)
return
}
@@ -513,13 +529,13 @@ func (client RestClient) Shutdown() (err error) {
}
// AbortCatchup aborts the currently running catchup
-func (client RestClient) AbortCatchup(catchpointLabel string) (response privateV2.CatchpointAbortResponse, err error) {
+func (client RestClient) AbortCatchup(catchpointLabel string) (response model.CatchpointAbortResponse, err error) {
err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "DELETE", false, true)
return
}
// Catchup start catching up to the give catchpoint label
-func (client RestClient) Catchup(catchpointLabel string) (response privateV2.CatchpointStartResponse, err error) {
+func (client RestClient) Catchup(catchpointLabel string) (response model.CatchpointStartResponse, err error) {
err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "POST", false, true)
return
}
@@ -537,7 +553,7 @@ func (client RestClient) GetGoRoutines(ctx context.Context) (goRoutines string,
// Compile compiles the given program and returned the compiled program
func (client RestClient) Compile(program []byte) (compiledProgram []byte, programHash crypto.Digest, err error) {
- var compileResponse generatedV2.CompileResponse
+ var compileResponse model.CompileResponse
err = client.submitForm(&compileResponse, "/v2/teal/compile", program, "POST", false, true)
if err != nil {
return nil, crypto.Digest{}, err
@@ -599,33 +615,39 @@ func (client RestClient) RawDryrun(data []byte) (response []byte, err error) {
return
}
+// StateProofs gets a state proof that covers a given round
+func (client RestClient) StateProofs(round uint64) (response model.StateProofResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/stateproofs/%d", round), nil)
+ return
+}
+
// LightBlockHeaderProof gets a Merkle proof for the light block header of a given round.
-func (client RestClient) LightBlockHeaderProof(round uint64) (response generatedV2.LightBlockHeaderProofResponse, err error) {
+func (client RestClient) LightBlockHeaderProof(round uint64) (response model.LightBlockHeaderProofResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/lightheader/proof", round), nil)
return
}
// TransactionProof gets a Merkle proof for a transaction in a block.
-func (client RestClient) TransactionProof(txid string, round uint64, hashType crypto.HashType) (response generatedV2.TransactionProofResponse, err error) {
+func (client RestClient) TransactionProof(txid string, round uint64, hashType crypto.HashType) (response model.TransactionProofResponse, err error) {
txid = stripTransaction(txid)
err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), proofParams{HashType: hashType.String()})
return
}
// PostParticipationKey sends a key file to the node.
-func (client RestClient) PostParticipationKey(file []byte) (response generatedV2.PostParticipationResponse, err error) {
+func (client RestClient) PostParticipationKey(file []byte) (response model.PostParticipationResponse, err error) {
err = client.post(&response, "/v2/participation", file)
return
}
// GetParticipationKeys gets all of the participation keys
-func (client RestClient) GetParticipationKeys() (response generatedV2.ParticipationKeysResponse, err error) {
+func (client RestClient) GetParticipationKeys() (response model.ParticipationKeysResponse, err error) {
err = client.get(&response, "/v2/participation", nil)
return
}
// GetParticipationKeyByID gets a single participation key
-func (client RestClient) GetParticipationKeyByID(participationID string) (response generatedV2.ParticipationKeyResponse, err error) {
+func (client RestClient) GetParticipationKeyByID(participationID string) (response model.ParticipationKeyResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/participation/%s", participationID), nil)
return
}
diff --git a/daemon/algod/api/generated_server.yml b/daemon/algod/api/generated_server.yml
new file mode 100644
index 000000000..404cd35cc
--- /dev/null
+++ b/daemon/algod/api/generated_server.yml
@@ -0,0 +1,12 @@
+package: generated
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ exclude-tags:
+ - private
+ - common
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+output: ./server/v2/generated/routes.go
diff --git a/daemon/algod/api/generated_types.yml b/daemon/algod/api/generated_types.yml
new file mode 100644
index 000000000..5a612cb00
--- /dev/null
+++ b/daemon/algod/api/generated_types.yml
@@ -0,0 +1,11 @@
+package: generated
+generate:
+ models: true
+output-options:
+ exclude-tags:
+ - private
+ - common
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+output: ./server/v2/generated/types.go
diff --git a/daemon/algod/api/private_server.yml b/daemon/algod/api/private_server.yml
new file mode 100644
index 000000000..9a3755acd
--- /dev/null
+++ b/daemon/algod/api/private_server.yml
@@ -0,0 +1,11 @@
+package: private
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ include-tags:
+ - private
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+output: ./server/v2/generated/private/routes.go
diff --git a/daemon/algod/api/private_types.yml b/daemon/algod/api/private_types.yml
new file mode 100644
index 000000000..74ea99716
--- /dev/null
+++ b/daemon/algod/api/private_types.yml
@@ -0,0 +1,10 @@
+package: private
+generate:
+ models: true
+output-options:
+ include-tags:
+ - private
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+output: ./server/v2/generated/private/types.go
diff --git a/daemon/algod/api/server/common/handlers.go b/daemon/algod/api/server/common/handlers.go
index c0d2f43a9..e32cb2645 100644
--- a/daemon/algod/api/server/common/handlers.go
+++ b/daemon/algod/api/server/common/handlers.go
@@ -23,6 +23,7 @@ import (
"github.com/labstack/echo/v4"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
)
@@ -66,7 +67,7 @@ func SwaggerJSON(ctx lib.ReqContext, context echo.Context) {
w := context.Response().Writer
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
- w.Write([]byte(lib.SwaggerSpecJSON))
+ _, _ = w.Write([]byte(api.SwaggerSpecJSONEmbed))
}
// HealthCheck is an httpHandler for route GET /health
diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go
index 317214b6f..de76f6fb0 100644
--- a/daemon/algod/api/server/router.go
+++ b/daemon/algod/api/server/router.go
@@ -72,8 +72,10 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/lib/middlewares"
"github.com/algorand/go-algorand/daemon/algod/api/server/v1/routes"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
+ npprivate "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/nonparticipating/private"
+ nppublic "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/nonparticipating/public"
+ pprivate "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/participating/private"
+ ppublic "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/participating/public"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
"github.com/algorand/go-algorand/util/tokens"
@@ -147,8 +149,10 @@ func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-ch
Log: logger,
Shutdown: shutdown,
}
- generated.RegisterHandlers(e, &v2Handler, apiAuthenticator)
- private.RegisterHandlers(e, &v2Handler, adminAuthenticator)
+ nppublic.RegisterHandlers(e, &v2Handler, apiAuthenticator)
+ npprivate.RegisterHandlers(e, &v2Handler, adminAuthenticator)
+ ppublic.RegisterHandlers(e, &v2Handler, apiAuthenticator)
+ pprivate.RegisterHandlers(e, &v2Handler, adminAuthenticator)
return e
}
diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go
index 31725f13a..10971a622 100644
--- a/daemon/algod/api/server/v2/account.go
+++ b/daemon/algod/api/server/v2/account.go
@@ -24,34 +24,34 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
)
-// AccountDataToAccount converts basics.AccountData to v2.generated.Account
+// AccountDataToAccount converts basics.AccountData to v2.model.Account
func AccountDataToAccount(
address string, record *basics.AccountData,
lastRound basics.Round, consensus *config.ConsensusParams,
amountWithoutPendingRewards basics.MicroAlgos,
-) (generated.Account, error) {
+) (model.Account, error) {
- assets := make([]generated.AssetHolding, 0, len(record.Assets))
+ assets := make([]model.AssetHolding, 0, len(record.Assets))
for curid, holding := range record.Assets {
// Empty is ok, asset may have been deleted, so we can no
// longer fetch the creator
- holding := generated.AssetHolding{
+ holding := model.AssetHolding{
Amount: holding.Amount,
- AssetId: uint64(curid),
+ AssetID: uint64(curid),
IsFrozen: holding.Frozen,
}
assets = append(assets, holding)
}
sort.Slice(assets, func(i, j int) bool {
- return assets[i].AssetId < assets[j].AssetId
+ return assets[i].AssetID < assets[j].AssetID
})
- createdAssets := make([]generated.Asset, 0, len(record.AssetParams))
+ createdAssets := make([]model.Asset, 0, len(record.AssetParams))
for idx, params := range record.AssetParams {
asset := AssetParamsToAsset(address, idx, &params)
createdAssets = append(createdAssets, asset)
@@ -60,9 +60,9 @@ func AccountDataToAccount(
return createdAssets[i].Index < createdAssets[j].Index
})
- var apiParticipation *generated.AccountParticipation
+ var apiParticipation *model.AccountParticipation
if record.VoteID != (crypto.OneTimeSignatureVerifier{}) {
- apiParticipation = &generated.AccountParticipation{
+ apiParticipation = &model.AccountParticipation{
VoteParticipationKey: record.VoteID[:],
SelectionParticipationKey: record.SelectionID[:],
VoteFirstValid: uint64(record.VoteFirstValid),
@@ -75,7 +75,7 @@ func AccountDataToAccount(
}
}
- createdApps := make([]generated.Application, 0, len(record.AppParams))
+ createdApps := make([]model.Application, 0, len(record.AppParams))
for appIdx, appParams := range record.AppParams {
app := AppParamsToApplication(address, appIdx, &appParams)
createdApps = append(createdApps, app)
@@ -84,13 +84,13 @@ func AccountDataToAccount(
return createdApps[i].Id < createdApps[j].Id
})
- appsLocalState := make([]generated.ApplicationLocalState, 0, len(record.AppLocalStates))
+ appsLocalState := make([]model.ApplicationLocalState, 0, len(record.AppLocalStates))
for appIdx, state := range record.AppLocalStates {
localState := convertTKVToGenerated(&state.KeyValue)
- appsLocalState = append(appsLocalState, generated.ApplicationLocalState{
+ appsLocalState = append(appsLocalState, model.ApplicationLocalState{
Id: uint64(appIdx),
KeyValue: localState,
- Schema: generated.ApplicationStateSchema{
+ Schema: model.ApplicationStateSchema{
NumByteSlice: state.Schema.NumByteSlice,
NumUint: state.Schema.NumUint,
},
@@ -100,7 +100,7 @@ func AccountDataToAccount(
return appsLocalState[i].Id < appsLocalState[j].Id
})
- totalAppSchema := generated.ApplicationStateSchema{
+ totalAppSchema := model.ApplicationStateSchema{
NumByteSlice: record.TotalAppSchema.NumByteSlice,
NumUint: record.TotalAppSchema.NumUint,
}
@@ -109,12 +109,12 @@ func AccountDataToAccount(
amount := record.MicroAlgos
pendingRewards, overflowed := basics.OSubA(amount, amountWithoutPendingRewards)
if overflowed {
- return generated.Account{}, errors.New("overflow on pending reward calculation")
+ return model.Account{}, errors.New("overflow on pending reward calculation")
}
minBalance := record.MinBalance(consensus)
- return generated.Account{
+ return model.Account{
SigType: nil,
Round: uint64(lastRound),
Address: address,
@@ -136,21 +136,23 @@ func AccountDataToAccount(
TotalAppsOptedIn: uint64(len(appsLocalState)),
AppsTotalSchema: &totalAppSchema,
AppsTotalExtraPages: numOrNil(totalExtraPages),
+ TotalBoxes: numOrNil(record.TotalBoxes),
+ TotalBoxBytes: numOrNil(record.TotalBoxBytes),
MinBalance: minBalance.Raw,
}, nil
}
-func convertTKVToGenerated(tkv *basics.TealKeyValue) *generated.TealKeyValueStore {
+func convertTKVToGenerated(tkv *basics.TealKeyValue) *model.TealKeyValueStore {
if tkv == nil || len(*tkv) == 0 {
return nil
}
- converted := make(generated.TealKeyValueStore, 0, len(*tkv))
+ converted := make(model.TealKeyValueStore, 0, len(*tkv))
rawKeyBytes := make([]string, 0, len(*tkv))
for k, v := range *tkv {
- converted = append(converted, generated.TealKeyValue{
+ converted = append(converted, model.TealKeyValue{
Key: base64.StdEncoding.EncodeToString([]byte(k)),
- Value: generated.TealValue{
+ Value: model.TealValue{
Type: uint64(v.Type),
Bytes: base64.StdEncoding.EncodeToString([]byte(v.Bytes)),
Uint: v.Uint,
@@ -164,7 +166,7 @@ func convertTKVToGenerated(tkv *basics.TealKeyValue) *generated.TealKeyValueStor
return &converted
}
-func convertGeneratedTKV(akvs *generated.TealKeyValueStore) (basics.TealKeyValue, error) {
+func convertGeneratedTKV(akvs *model.TealKeyValueStore) (basics.TealKeyValue, error) {
if akvs == nil || len(*akvs) == 0 {
return nil, nil
}
@@ -192,8 +194,8 @@ func convertGeneratedTKV(akvs *generated.TealKeyValueStore) (basics.TealKeyValue
return tkv, nil
}
-// AccountToAccountData converts v2.generated.Account to basics.AccountData
-func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
+// AccountToAccountData converts v2.model.Account to basics.AccountData
+func AccountToAccountData(a *model.Account) (basics.AccountData, error) {
var voteID crypto.OneTimeSignatureVerifier
var selID crypto.VRFVerifier
var voteFirstValid basics.Round
@@ -279,7 +281,7 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
if a.Assets != nil && len(*a.Assets) > 0 {
assets = make(map[basics.AssetIndex]basics.AssetHolding, len(*a.Assets))
for _, h := range *a.Assets {
- assets[basics.AssetIndex(h.AssetId)] = basics.AssetHolding{
+ assets[basics.AssetIndex(h.AssetID)] = basics.AssetHolding{
Amount: h.Amount,
Frozen: h.IsFrozen,
}
@@ -330,6 +332,16 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
totalExtraPages = uint32(*a.AppsTotalExtraPages)
}
+ var totalBoxes uint64
+ if a.TotalBoxes != nil {
+ totalBoxes = *a.TotalBoxes
+ }
+
+ var totalBoxBytes uint64
+ if a.TotalBoxBytes != nil {
+ totalBoxBytes = *a.TotalBoxBytes
+ }
+
status, err := basics.UnmarshalStatus(a.Status)
if err != nil {
return basics.AccountData{}, err
@@ -350,6 +362,8 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
AppParams: appParams,
TotalAppSchema: totalSchema,
TotalExtraAppPages: totalExtraPages,
+ TotalBoxes: totalBoxes,
+ TotalBoxBytes: totalBoxBytes,
}
if a.AuthAddr != nil {
@@ -375,8 +389,8 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
return ad, nil
}
-// ApplicationParamsToAppParams converts generated.ApplicationParams to basics.AppParams
-func ApplicationParamsToAppParams(gap *generated.ApplicationParams) (basics.AppParams, error) {
+// ApplicationParamsToAppParams converts model.ApplicationParams to basics.AppParams
+func ApplicationParamsToAppParams(gap *model.ApplicationParams) (basics.AppParams, error) {
ap := basics.AppParams{
ApprovalProgram: gap.ApprovalProgram,
ClearStateProgram: gap.ClearStateProgram,
@@ -408,23 +422,23 @@ func ApplicationParamsToAppParams(gap *generated.ApplicationParams) (basics.AppP
return ap, nil
}
-// AppParamsToApplication converts basics.AppParams to generated.Application
-func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *basics.AppParams) generated.Application {
+// AppParamsToApplication converts basics.AppParams to model.Application
+func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *basics.AppParams) model.Application {
globalState := convertTKVToGenerated(&appParams.GlobalState)
extraProgramPages := uint64(appParams.ExtraProgramPages)
- app := generated.Application{
+ app := model.Application{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator,
ApprovalProgram: appParams.ApprovalProgram,
ClearStateProgram: appParams.ClearStateProgram,
ExtraProgramPages: numOrNil(extraProgramPages),
GlobalState: globalState,
- LocalStateSchema: &generated.ApplicationStateSchema{
+ LocalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: appParams.LocalStateSchema.NumByteSlice,
NumUint: appParams.LocalStateSchema.NumUint,
},
- GlobalStateSchema: &generated.ApplicationStateSchema{
+ GlobalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: appParams.GlobalStateSchema.NumByteSlice,
NumUint: appParams.GlobalStateSchema.NumUint,
},
@@ -433,10 +447,10 @@ func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *b
return app
}
-// AssetParamsToAsset converts basics.AssetParams to generated.Asset
-func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.AssetParams) generated.Asset {
+// AssetParamsToAsset converts basics.AssetParams to model.Asset
+func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.AssetParams) model.Asset {
frozen := params.DefaultFrozen
- assetParams := generated.AssetParams{
+ assetParams := model.AssetParams{
Creator: creator,
Total: params.Total,
Decimals: uint64(params.Decimals),
@@ -458,7 +472,7 @@ func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.As
assetParams.MetadataHash = &metadataHash
}
- return generated.Asset{
+ return model.Asset{
Index: uint64(idx),
Params: assetParams,
}
diff --git a/daemon/algod/api/server/v2/account_test.go b/daemon/algod/api/server/v2/account_test.go
index 1b8bad015..c6562abee 100644
--- a/daemon/algod/api/server/v2/account_test.go
+++ b/daemon/algod/api/server/v2/account_test.go
@@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -135,8 +135,8 @@ func TestAccount(t *testing.T) {
verifyCreatedApp(0, appIdx1, appParams1)
verifyCreatedApp(1, appIdx2, appParams2)
- makeTKV := func(k string, v interface{}) generated.TealKeyValue {
- value := generated.TealValue{}
+ makeTKV := func(k string, v interface{}) model.TealKeyValue {
+ value := model.TealValue{}
switch v.(type) {
case int:
value.Uint = uint64(v.(int))
@@ -147,13 +147,13 @@ func TestAccount(t *testing.T) {
default:
panic(fmt.Sprintf("Unknown teal type %v", t))
}
- return generated.TealKeyValue{
+ return model.TealKeyValue{
Key: b64(k),
Value: value,
}
}
- verifyAppLocalState := func(index int, appIdx basics.AppIndex, numUints, numByteSlices uint64, keyValues generated.TealKeyValueStore) {
+ verifyAppLocalState := func(index int, appIdx basics.AppIndex, numUints, numByteSlices uint64, keyValues model.TealKeyValueStore) {
require.Equal(t, uint64(appIdx), (*conv.AppsLocalState)[index].Id)
require.Equal(t, numUints, (*conv.AppsLocalState)[index].Schema.NumUint)
require.Equal(t, numByteSlices, (*conv.AppsLocalState)[index].Schema.NumByteSlice)
@@ -165,8 +165,8 @@ func TestAccount(t *testing.T) {
require.NotNil(t, conv.AppsLocalState)
require.Equal(t, 2, len(*conv.AppsLocalState))
- verifyAppLocalState(0, appIdx1, 10, 0, generated.TealKeyValueStore{makeTKV("bytes", "value1"), makeTKV("uint", 1)})
- verifyAppLocalState(1, appIdx2, 10, 0, generated.TealKeyValueStore{makeTKV("bytes", "value2"), makeTKV("uint", 2)})
+ verifyAppLocalState(0, appIdx1, 10, 0, model.TealKeyValueStore{makeTKV("bytes", "value1"), makeTKV("uint", 1)})
+ verifyAppLocalState(1, appIdx2, 10, 0, model.TealKeyValueStore{makeTKV("bytes", "value2"), makeTKV("uint", 2)})
verifyCreatedAsset := func(index int, assetIdx basics.AssetIndex, params basics.AssetParams) {
require.Equal(t, uint64(assetIdx), (*conv.CreatedAssets)[index].Index)
@@ -194,7 +194,7 @@ func TestAccount(t *testing.T) {
t.Run("IsDeterministic", func(t *testing.T) {
// convert the same account a few more times to make sure we always
- // produce the same generated.Account
+ // produce the same model.Account
for i := 0; i < 10; i++ {
anotherConv, err := AccountDataToAccount(addr, &b, round, &proto, a.MicroAlgos)
require.NoError(t, err)
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 3079a71aa..6a3cc67c0 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -22,7 +22,7 @@ import (
"strings"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -35,7 +35,7 @@ import (
)
// DryrunRequest object uploaded to /v2/teal/dryrun
-// It is the same as generated.DryrunRequest but Txns deserialized properly.
+// It is the same as model.DryrunRequest but Txns deserialized properly.
// Given the Transactions and simulated ledger state upload, run TEAL scripts and return debugging information.
// This is also used for msgp-decoding
type DryrunRequest struct {
@@ -43,9 +43,9 @@ type DryrunRequest struct {
Txns []transactions.SignedTxn `codec:"txns"` // not supposed to be serialized
// Optional, useful for testing Application Call txns.
- Accounts []generated.Account `codec:"accounts"`
+ Accounts []model.Account `codec:"accounts"`
- Apps []generated.Application `codec:"apps"`
+ Apps []model.Application `codec:"apps"`
// ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in.
ProtocolVersion string `codec:"protocol-version"`
@@ -56,12 +56,12 @@ type DryrunRequest struct {
// LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.
LatestTimestamp int64 `codec:"latest-timestamp"`
- Sources []generated.DryrunSource `codec:"sources"`
+ Sources []model.DryrunSource `codec:"sources"`
}
-// DryrunRequestFromGenerated converts generated.DryrunRequest to DryrunRequest field by fields
+// DryrunRequestFromGenerated converts model.DryrunRequest to DryrunRequest field by fields
// and re-types Txns []transactions.SignedTxn
-func DryrunRequestFromGenerated(gdr *generated.DryrunRequest) (dr DryrunRequest, err error) {
+func DryrunRequestFromGenerated(gdr *model.DryrunRequest) (dr DryrunRequest, err error) {
dr.Txns = make([]transactions.SignedTxn, 0, len(gdr.Txns))
for _, raw := range gdr.Txns {
// no transactions.SignedTxn in OAS, map[string]interface{} is not good enough
@@ -114,7 +114,7 @@ func (dr *DryrunRequest) ExpandSources() error {
type dryrunDebugReceiver struct {
disassembly string
lines []string
- history []generated.DryrunState
+ history []model.DryrunState
scratchActive []bool
}
@@ -151,14 +151,14 @@ func (ddr *dryrunDebugReceiver) updateScratch() {
}
}
-func (ddr *dryrunDebugReceiver) stateToState(state *logic.DebugState) generated.DryrunState {
- st := generated.DryrunState{
+func (ddr *dryrunDebugReceiver) stateToState(state *logic.DebugState) model.DryrunState {
+ st := model.DryrunState{
Line: uint64(state.Line),
Pc: uint64(state.PC),
}
- st.Stack = make([]generated.TealValue, len(state.Stack))
+ st.Stack = make([]model.TealValue, len(state.Stack))
for i, v := range state.Stack {
- st.Stack[i] = generated.TealValue{
+ st.Stack[i] = model.TealValue{
Uint: v.Uint,
Bytes: v.Bytes,
Type: uint64(v.Type),
@@ -169,9 +169,9 @@ func (ddr *dryrunDebugReceiver) stateToState(state *logic.DebugState) generated.
*st.Error = state.Error
}
- scratch := make([]generated.TealValue, len(state.Scratch))
+ scratch := make([]model.TealValue, len(state.Scratch))
for i, v := range state.Scratch {
- scratch[i] = generated.TealValue{
+ scratch[i] = model.TealValue{
Uint: v.Uint,
Bytes: v.Bytes,
Type: uint64(v.Type),
@@ -330,6 +330,10 @@ func (dl *dryrunLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx
return result, nil
}
+func (dl *dryrunLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return nil, fmt.Errorf("boxes not implemented in dry run")
+}
+
func (dl *dryrunLedger) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
switch ctype {
case basics.AssetCreatable:
@@ -377,7 +381,7 @@ func makeBalancesAdapter(dl *dryrunLedger, txn *transactions.Transaction, appIdx
// if dr.Sources is set it overrides appropriate entires in stxn.Lsig.Logic or Apps[i]
// important: dr.Accounts are not used for program lookup for application execution
// important: dr.ProtocolVersion is used by underlying ledger implementation so that it must exist in config.Consensus
-func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
+func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) {
err := dr.ExpandSources()
if err != nil {
response.Error = err.Error()
@@ -412,9 +416,9 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
}
ep.PooledApplicationBudget = &pooledAppBudget
- response.Txns = make([]generated.DryrunTxnResult, len(dr.Txns))
+ response.Txns = make([]model.DryrunTxnResult, len(dr.Txns))
for ti, stxn := range dr.Txns {
- var result generated.DryrunTxnResult
+ var result model.DryrunTxnResult
if len(stxn.Lsig.Logic) > 0 {
var debug dryrunDebugReceiver
ep.Debugger = &debug
@@ -446,9 +450,9 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
if stxn.Txn.OnCompletion == transactions.OptInOC {
if idx, ok := dl.accountsIn[stxn.Txn.Sender]; ok {
acct := dl.dr.Accounts[idx]
- ls := generated.ApplicationLocalState{
+ ls := model.ApplicationLocalState{
Id: uint64(appIdx),
- KeyValue: new(generated.TealKeyValueStore),
+ KeyValue: new(model.TealKeyValueStore),
}
for _, app := range dr.Apps {
if basics.AppIndex(app.Id) == appIdx {
@@ -459,7 +463,7 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
}
}
if acct.AppsLocalState == nil {
- lss := []generated.ApplicationLocalState{ls}
+ lss := []model.ApplicationLocalState{ls}
acct.AppsLocalState = &lss
} else {
found := false
@@ -519,13 +523,13 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
result.AppCallTrace = &debug.history
result.GlobalDelta = StateDeltaToStateDelta(delta.GlobalDelta)
if len(delta.LocalDeltas) > 0 {
- localDeltas := make([]generated.AccountStateDelta, 0, len(delta.LocalDeltas))
+ localDeltas := make([]model.AccountStateDelta, 0, len(delta.LocalDeltas))
for k, v := range delta.LocalDeltas {
ldaddr, err2 := stxn.Txn.AddressByIndex(k, stxn.Txn.Sender)
if err2 != nil {
messages = append(messages, err2.Error())
}
- localDeltas = append(localDeltas, generated.AccountStateDelta{
+ localDeltas = append(localDeltas, model.AccountStateDelta{
Address: ldaddr.String(),
Delta: *StateDeltaToStateDelta(v),
})
@@ -581,15 +585,15 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
}
}
-// StateDeltaToStateDelta converts basics.StateDelta to generated.StateDelta
-func StateDeltaToStateDelta(sd basics.StateDelta) *generated.StateDelta {
+// StateDeltaToStateDelta converts basics.StateDelta to model.StateDelta
+func StateDeltaToStateDelta(sd basics.StateDelta) *model.StateDelta {
if len(sd) == 0 {
return nil
}
- gsd := make(generated.StateDelta, 0, len(sd))
+ gsd := make(model.StateDelta, 0, len(sd))
for k, v := range sd {
- value := generated.EvalDelta{Action: uint64(v.Action)}
+ value := model.EvalDelta{Action: uint64(v.Action)}
if v.Action == basics.SetBytesAction {
bytesVal := base64.StdEncoding.EncodeToString([]byte(v.Bytes))
value.Bytes = &bytesVal
@@ -598,7 +602,7 @@ func StateDeltaToStateDelta(sd basics.StateDelta) *generated.StateDelta {
value.Uint = &uintVal
}
// basics.DeleteAction does not require Uint/Bytes
- kv := generated.EvalDeltaKeyValue{
+ kv := model.EvalDeltaKeyValue{
Key: base64.StdEncoding.EncodeToString([]byte(k)),
Value: value,
}
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index cfa87cb98..9ec7444e5 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -30,7 +30,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -47,7 +47,7 @@ func unB64(x string) []byte {
return out
}
-func tvStr(tv generated.TealValue) string {
+func tvStr(tv model.TealValue) string {
if tv.Type == uint64(basics.TealBytesType) {
return tv.Bytes
} else if tv.Type == uint64(basics.TealUintType) {
@@ -56,7 +56,7 @@ func tvStr(tv generated.TealValue) string {
return "UNKNOWN TEAL VALUE"
}
-func dbStack(stack []generated.TealValue) string {
+func dbStack(stack []model.TealValue) string {
parts := make([]string, len(stack))
for i, sv := range stack {
parts[i] = tvStr(sv)
@@ -68,7 +68,7 @@ func b64(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
-func logTrace(t *testing.T, lines []string, trace []generated.DryrunState) {
+func logTrace(t *testing.T, lines []string, trace []model.DryrunState) {
var disasm string
for _, ds := range trace {
var line string
@@ -84,13 +84,13 @@ func logTrace(t *testing.T, lines []string, trace []generated.DryrunState) {
}
}
-func logStateDelta(t *testing.T, sd generated.StateDelta) {
+func logStateDelta(t *testing.T, sd model.StateDelta) {
for _, vd := range sd {
t.Logf("\t%s: %#v", vd.Key, vd)
}
}
-func logResponse(t *testing.T, response *generated.DryrunResponse) {
+func logResponse(t *testing.T, response *model.DryrunResponse) {
t.Log(response.Error)
for i, rt := range response.Txns {
t.Logf("txn[%d]", i)
@@ -138,7 +138,7 @@ func TestDryrunLogicSig(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -163,12 +163,12 @@ func TestDryrunLogicSigSource(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
dr.Txns = []transactions.SignedTxn{{}}
- dr.Sources = []generated.DryrunSource{
+ dr.Sources = []model.DryrunSource{
{
Source: "int 1",
FieldName: "lsig",
@@ -362,7 +362,7 @@ func init() {
config.Consensus[dryrunMakeLedgerProto] = proto
}
-func checkLogicSigPass(t *testing.T, response *generated.DryrunResponse) {
+func checkLogicSigPass(t *testing.T, response *model.DryrunResponse) {
if len(response.Txns) < 1 {
t.Error("no response txns")
} else if len(response.Txns) == 0 {
@@ -375,7 +375,7 @@ func checkLogicSigPass(t *testing.T, response *generated.DryrunResponse) {
}
}
-func checkAppCallResponse(t *testing.T, response *generated.DryrunResponse, msg string) {
+func checkAppCallResponse(t *testing.T, response *model.DryrunResponse, msg string) {
if len(response.Txns) < 1 {
t.Error("no response txns")
} else if len(response.Txns) == 0 {
@@ -388,17 +388,17 @@ func checkAppCallResponse(t *testing.T, response *generated.DryrunResponse, msg
if response.Txns[idx].AppCallMessages != nil {
messages := *response.Txns[idx].AppCallMessages
assert.GreaterOrEqual(t, len(messages), 1)
- assert.Equal(t, msg, messages[len(messages)-1])
+ assert.Contains(t, messages[len(messages)-1], msg)
}
}
}
}
-func checkAppCallPass(t *testing.T, response *generated.DryrunResponse) {
+func checkAppCallPass(t *testing.T, response *model.DryrunResponse) {
checkAppCallResponse(t, response, "PASS")
}
-func checkAppCallReject(t *testing.T, response *generated.DryrunResponse) {
+func checkAppCallReject(t *testing.T, response *model.DryrunResponse) {
checkAppCallResponse(t, response, "REJECT")
}
@@ -407,7 +407,7 @@ type expectedSlotType struct {
tt basics.TealType
}
-func checkAppCallScratchType(t *testing.T, response *generated.DryrunResponse, txnIdx int, expected []expectedSlotType) {
+func checkAppCallScratchType(t *testing.T, response *model.DryrunResponse, txnIdx int, expected []expectedSlotType) {
txn := response.Txns[txnIdx]
// We should have a trace
assert.NotNil(t, txn.AppCallTrace)
@@ -428,7 +428,7 @@ func TestDryrunGlobal1(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -445,19 +445,19 @@ func TestDryrunGlobal1(t *testing.T) {
},
},
}
- gkv := generated.TealKeyValueStore{
- generated.TealKeyValue{
+ gkv := model.TealKeyValueStore{
+ model.TealKeyValue{
Key: b64("foo"),
- Value: generated.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
+ Value: model.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: globalTestProgram,
GlobalState: &gkv,
- GlobalStateSchema: &generated.ApplicationStateSchema{
+ GlobalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 10,
NumUint: 10,
},
@@ -477,7 +477,7 @@ func TestDryrunGlobal2(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -495,16 +495,16 @@ func TestDryrunGlobal2(t *testing.T) {
},
},
}
- gkv := generated.TealKeyValueStore{
- generated.TealKeyValue{
+ gkv := model.TealKeyValueStore{
+ model.TealKeyValue{
Key: b64("foo"),
- Value: generated.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
+ Value: model.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: globalTestProgram,
GlobalState: &gkv,
},
@@ -530,7 +530,7 @@ func TestDryrunLocal1(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -548,23 +548,23 @@ func TestDryrunLocal1(t *testing.T) {
},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: localStateCheckProg,
- LocalStateSchema: &generated.ApplicationStateSchema{
+ LocalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 10,
NumUint: 10,
},
},
},
}
- dr.Accounts = []generated.Account{
+ dr.Accounts = []model.Account{
{
Status: "Online",
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{Id: 1}},
+ AppsLocalState: &[]model.ApplicationLocalState{{Id: 1}},
},
}
doDryrunRequest(&dr, &response)
@@ -604,7 +604,7 @@ func TestDryrunLocal1A(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -622,26 +622,26 @@ func TestDryrunLocal1A(t *testing.T) {
},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
- LocalStateSchema: &generated.ApplicationStateSchema{
+ Params: model.ApplicationParams{
+ LocalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 10,
NumUint: 10,
},
},
},
}
- dr.Accounts = []generated.Account{
+ dr.Accounts = []model.Account{
{
Status: "Online",
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{Id: 1}},
+ AppsLocalState: &[]model.ApplicationLocalState{{Id: 1}},
},
}
- dr.Sources = []generated.DryrunSource{
+ dr.Sources = []model.DryrunSource{
{
Source: localStateCheckSource,
FieldName: "approv",
@@ -682,7 +682,7 @@ func TestDryrunLocalCheck(t *testing.T) {
// {"txns":[{"lsig":{"l":"AiABASI="},"txn":{}}]}
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -700,28 +700,28 @@ func TestDryrunLocalCheck(t *testing.T) {
},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: localStateCheckProg,
},
},
}
- localv := make(generated.TealKeyValueStore, 1)
- localv[0] = generated.TealKeyValue{
+ localv := make(model.TealKeyValueStore, 1)
+ localv[0] = model.TealKeyValue{
Key: b64("foo"),
- Value: generated.TealValue{
+ Value: model.TealValue{
Type: uint64(basics.TealBytesType),
Bytes: b64("bar"),
},
}
- dr.Accounts = []generated.Account{
+ dr.Accounts = []model.Account{
{
Status: "Online",
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{
+ AppsLocalState: &[]model.ApplicationLocalState{{
Id: 1,
KeyValue: &localv,
}},
@@ -737,7 +737,7 @@ func TestDryrunMultipleTxns(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -754,19 +754,19 @@ func TestDryrunMultipleTxns(t *testing.T) {
}
dr.Txns = []transactions.SignedTxn{txn, txn}
- gkv := generated.TealKeyValueStore{
- generated.TealKeyValue{
+ gkv := model.TealKeyValueStore{
+ model.TealKeyValue{
Key: b64("foo"),
- Value: generated.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
+ Value: model.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: globalTestProgram,
GlobalState: &gkv,
- GlobalStateSchema: &generated.ApplicationStateSchema{
+ GlobalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 10,
NumUint: 10,
},
@@ -784,7 +784,7 @@ func TestDryrunEncodeDecode(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- var gdr generated.DryrunRequest
+ var gdr model.DryrunRequest
txns := []transactions.SignedTxn{
{
Txn: transactions.Transaction{
@@ -805,25 +805,25 @@ func TestDryrunEncodeDecode(t *testing.T) {
gdr.Txns = append(gdr.Txns, enc)
}
- gdr.Apps = []generated.Application{
+ gdr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: localStateCheckProg,
},
},
}
- localv := make(generated.TealKeyValueStore, 1)
- localv[0] = generated.TealKeyValue{
+ localv := make(model.TealKeyValueStore, 1)
+ localv[0] = model.TealKeyValue{
Key: b64("foo"),
- Value: generated.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
+ Value: model.TealValue{Type: uint64(basics.TealBytesType), Bytes: b64("bar")},
}
- gdr.Accounts = []generated.Account{
+ gdr.Accounts = []model.Account{
{
Status: "Online",
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{
+ AppsLocalState: &[]model.ApplicationLocalState{{
Id: 1,
KeyValue: &localv,
}},
@@ -832,14 +832,14 @@ func TestDryrunEncodeDecode(t *testing.T) {
// use protocol
encoded := protocol.EncodeJSON(&gdr)
- var decoded generated.DryrunRequest
+ var decoded model.DryrunRequest
err := protocol.DecodeJSON(encoded, &decoded)
require.NoError(t, err)
require.Equal(t, gdr, decoded)
buf := bytes.NewBuffer(encoded)
dec := protocol.NewJSONDecoder(buf)
- decoded = generated.DryrunRequest{}
+ decoded = model.DryrunRequest{}
err = dec.Decode(&decoded)
require.NoError(t, err)
require.Equal(t, gdr, decoded)
@@ -847,7 +847,7 @@ func TestDryrunEncodeDecode(t *testing.T) {
// use json
data, err := json.Marshal(&gdr)
require.NoError(t, err)
- gdr = generated.DryrunRequest{}
+ gdr = model.DryrunRequest{}
err = json.Unmarshal(data, &gdr)
require.NoError(t, err)
@@ -915,10 +915,10 @@ func TestDryrunMakeLedger(t *testing.T) {
},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: sender.String(),
ApprovalProgram: localStateCheckProg,
},
@@ -1002,7 +1002,7 @@ func TestDryrunRequestJSON(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- var gdr generated.DryrunRequest
+ var gdr model.DryrunRequest
buf := bytes.NewBuffer(dataJSON)
dec := protocol.NewJSONDecoder(buf)
err := dec.Decode(&gdr)
@@ -1014,7 +1014,7 @@ func TestDryrunRequestJSON(t *testing.T) {
require.Equal(t, 1, len(dr.Accounts))
require.Equal(t, 1, len(dr.Apps))
- var response generated.DryrunResponse
+ var response model.DryrunResponse
dr.ProtocolVersion = string(dryrunProtoVersion)
@@ -1092,8 +1092,8 @@ int 1`)
require.NoError(t, err)
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
var appIdx basics.AppIndex = 1
creator := randomAddress()
sender := randomAddress()
@@ -1110,18 +1110,18 @@ int 1`)
},
},
},
- Apps: []generated.Application{
+ Apps: []model.Application{
{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: approval,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
},
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{
Address: sender.String(),
Status: "Online",
@@ -1131,7 +1131,7 @@ int 1`)
}
dr.ProtocolVersion = string(dryrunProtoVersion)
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
require.NoError(t, err)
checkAppCallPass(t, &response)
@@ -1201,27 +1201,27 @@ return
},
},
},
- Apps: []generated.Application{
+ Apps: []model.Application{
{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: approval,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
{
Id: uint64(appIdx + 1),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: approv,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
},
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{
Address: sender.String(),
Status: "Online",
@@ -1231,7 +1231,7 @@ return
}
dr.ProtocolVersion = string(dryrunProtoVersion)
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
require.NoError(t, err)
checkAppCallPass(t, &response)
@@ -1338,70 +1338,60 @@ int 1`)
},
},
},
- Apps: []generated.Application{
+ Apps: []model.Application{
{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: app1,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
{
Id: uint64(appIdx + 1),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: app2,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
{
Id: uint64(appIdx + 2),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: app3,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
},
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{
- Address: sender.String(),
- Status: "Online",
- Amount: 10000000,
+ Address: (appIdx + 2).Address().String(),
+ Status: "Online",
+ AmountWithoutPendingRewards: 105_000,
},
},
}
dr.ProtocolVersion = string(dryrunProtoVersion)
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
require.Empty(t, response.Error)
- require.Equal(t, 3, len(response.Txns))
+ require.Len(t, response.Txns, 3)
for i, txn := range response.Txns {
messages := *txn.AppCallMessages
- require.GreaterOrEqual(t, len(messages), 1)
- cost := int64(*txn.BudgetConsumed) - int64(*txn.BudgetAdded)
- require.NotNil(t, cost)
- require.Equal(t, expectedCosts[i], cost)
- require.Equal(t, expectedBudgetAdded[i], *txn.BudgetAdded)
- statusMatches := false
- costExceedFound := false
- for _, msg := range messages {
- if strings.Contains(msg, "cost budget exceeded") {
- costExceedFound = true
- }
- if msg == test.msg {
- statusMatches = true
- }
- }
+ require.Contains(t, messages, test.msg, "Wrong result") // PASS or REJECT
+
if test.msg == "REJECT" {
- require.True(t, costExceedFound, "budget error not found in messages")
+ require.Contains(t, messages[2], "cost budget exceeded", "Failed for a surprise reason")
}
- require.True(t, statusMatches, "expected status not found in messages")
+
+ cost := int64(*txn.BudgetConsumed) - int64(*txn.BudgetAdded)
+ require.Equal(t, expectedCosts[i], cost, "txn %d cost", i)
+ require.Equal(t, expectedBudgetAdded[i], *txn.BudgetAdded, "txn %d added", i)
}
})
}
@@ -1434,8 +1424,8 @@ int 1`
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
@@ -1449,15 +1439,15 @@ int 1`
Sender: sender,
ApplicationID: appIdx,
}.SignedTxn()},
- Apps: []generated.Application{{
+ Apps: []model.Application{{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: sender.String(),
ApprovalProgram: approval,
ClearStateProgram: clst,
},
}},
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{
Address: sender.String(),
Status: "Online",
@@ -1473,7 +1463,7 @@ int 1`
},
}
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
checkAppCallPass(t, &response)
if t.Failed() {
@@ -1493,8 +1483,8 @@ int 0
require.NoError(t, err)
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
var appIdx basics.AppIndex = 1
creator := randomAddress()
rewardBase := uint64(10000000)
@@ -1510,18 +1500,18 @@ int 0
},
},
},
- Apps: []generated.Application{
+ Apps: []model.Application{
{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
Creator: creator.String(),
ApprovalProgram: approval,
ClearStateProgram: clst,
- LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ LocalStateSchema: &model.ApplicationStateSchema{NumByteSlice: 1},
},
},
},
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{
Address: creator.String(),
Status: "Online",
@@ -1533,7 +1523,7 @@ int 0
}
dr.ProtocolVersion = string(dryrunProtoVersion)
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
require.NoError(t, err)
checkAppCallPass(t, &response)
@@ -1561,8 +1551,8 @@ int 1
require.NoError(t, err)
ops, err := logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
@@ -1575,20 +1565,20 @@ int 1
Sender: sender,
ApplicationID: appIdx,
}.SignedTxn()},
- Apps: []generated.Application{{
+ Apps: []model.Application{{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: paySender.Program,
ClearStateProgram: clst,
},
}},
// Sender must exist (though no fee is ever taken)
// AppAccount must exist and be able to pay the inner fee and the pay amount (but min balance not checked)
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{Address: sender.String(), Status: "Offline"}, // sender
{Address: appIdx.Address().String(), Status: "Offline", AmountWithoutPendingRewards: 1_010}}, // app account
}
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
checkAppCallPass(t, &response)
if t.Failed() {
@@ -1638,22 +1628,22 @@ int 1`)
require.NoError(t, err)
ops, err := logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
txns := make([]transactions.SignedTxn, 0, 4)
- apps := make([]generated.Application, 0, 4)
+ apps := make([]model.Application, 0, 4)
for appIdx := basics.AppIndex(1); appIdx <= basics.AppIndex(4); appIdx++ {
txns = append(txns, txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: sender,
ApplicationID: appIdx}.SignedTxn())
- apps = append(apps, generated.Application{
+ apps = append(apps, model.Application{
Id: uint64(appIdx),
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: approvalOps.Program,
ClearStateProgram: clst,
},
@@ -1663,11 +1653,11 @@ int 1`)
ProtocolVersion: string(dryrunProtoVersion),
Txns: txns,
Apps: apps,
- Accounts: []generated.Account{
+ Accounts: []model.Account{
{Address: sender.String(), Status: "Offline", Amount: 100_000_000}, // sender
},
}
- var response generated.DryrunResponse
+ var response model.DryrunResponse
doDryrunRequest(&dr, &response)
checkAppCallScratchType(t, &response, 1, []expectedSlotType{
@@ -1686,9 +1676,9 @@ int 1`)
}
func checkEvalDelta(t *testing.T,
- response generated.DryrunResponse,
- expectedGlobalDelta generated.StateDelta,
- expectedLocalDelta generated.AccountStateDelta,
+ response model.DryrunResponse,
+ expectedGlobalDelta model.StateDelta,
+ expectedLocalDelta model.AccountStateDelta,
) {
for _, rt := range response.Txns {
if rt.GlobalDelta != nil && len(*rt.GlobalDelta) > 0 {
@@ -1713,26 +1703,26 @@ func TestDryrunCheckEvalDeltasReturned(t *testing.T) {
t.Parallel()
var dr DryrunRequest
- var response generated.DryrunResponse
+ var response model.DryrunResponse
// Expected responses.
expectedByte := b64("val")
expectedUint := uint64(1)
- expectedGlobalDelta := generated.StateDelta{
+ expectedGlobalDelta := model.StateDelta{
{
Key: b64("key"),
- Value: generated.EvalDelta{
+ Value: model.EvalDelta{
Action: uint64(basics.SetBytesAction),
Bytes: &expectedByte,
},
},
}
- expectedLocalDelta := generated.AccountStateDelta{
+ expectedLocalDelta := model.AccountStateDelta{
Address: basics.Address{}.String(),
- Delta: generated.StateDelta{
+ Delta: model.StateDelta{
{
Key: b64("key"),
- Value: generated.EvalDelta{
+ Value: model.EvalDelta{
Action: uint64(basics.SetUintAction),
Uint: &expectedUint,
},
@@ -1742,7 +1732,7 @@ func TestDryrunCheckEvalDeltasReturned(t *testing.T) {
// Test that a PASS and REJECT dryrun both return the dryrun evaldelta.
for i := range []int{0, 1} {
- ops, _ := logic.AssembleString(fmt.Sprintf(`
+ ops, err := logic.AssembleString(fmt.Sprintf(`
#pragma version 6
txna ApplicationArgs 0
txna ApplicationArgs 1
@@ -1752,6 +1742,7 @@ txna ApplicationArgs 0
int %d
app_local_put
int %d`, expectedUint, i))
+ require.NoError(t, err)
dr.ProtocolVersion = string(dryrunProtoVersion)
dr.Txns = []transactions.SignedTxn{
@@ -1768,27 +1759,27 @@ int %d`, expectedUint, i))
},
},
}
- dr.Apps = []generated.Application{
+ dr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: ops.Program,
- GlobalStateSchema: &generated.ApplicationStateSchema{
+ GlobalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 1,
NumUint: 1,
},
- LocalStateSchema: &generated.ApplicationStateSchema{
+ LocalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: 1,
NumUint: 1,
},
},
},
}
- dr.Accounts = []generated.Account{
+ dr.Accounts = []model.Account{
{
Status: "Online",
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{Id: 1}},
+ AppsLocalState: &[]model.ApplicationLocalState{{Id: 1}},
},
}
@@ -1803,5 +1794,41 @@ int %d`, expectedUint, i))
logResponse(t, &response)
}
}
+}
+// TestDryrunEarlyExit is a regression test. Ensures that we no longer exit so
+// early in eval() that problems are caused by the debugState being nil.
+func TestDryrunEarlyExit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var dr DryrunRequest
+ var response model.DryrunResponse
+
+ ops, err := logic.AssembleString("#pragma version 5 \n int 1")
+ require.NoError(t, err)
+ dr.ProtocolVersion = string(dryrunProtoVersion)
+
+ dr.Txns = []transactions.SignedTxn{
+ txntest.Txn{
+ ApplicationID: 1,
+ Type: protocol.ApplicationCallTx,
+ }.SignedTxn(),
+ }
+ dr.Apps = []model.Application{{
+ Id: 1,
+ Params: model.ApplicationParams{
+ ApprovalProgram: ops.Program,
+ },
+ }}
+ dr.Accounts = []model.Account{{
+ Status: "Online",
+ Address: basics.Address{}.String(),
+ }}
+ doDryrunRequest(&dr, &response)
+ checkAppCallPass(t, &response)
+
+ ops.Program[0] = 100 // version too high
+ doDryrunRequest(&dr, &response)
+ checkAppCallResponse(t, &response, "program version 100 greater than max")
}
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index f20df6f2f..aa53f101b 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -21,6 +21,7 @@ var (
errAssetDoesNotExist = "asset does not exist"
errAccountAppDoesNotExist = "account application info not found"
errAccountAssetDoesNotExist = "account asset info not found"
+ errBoxDoesNotExist = "box not found"
errFailedLookingUpLedger = "failed to retrieve information from the ledger"
errFailedLookingUpTransactionPool = "failed to retrieve information from the transaction pool"
errFailedRetrievingNodeStatus = "failed retrieving node status"
diff --git a/daemon/algod/api/server/v2/generated/model/model_types.yml b/daemon/algod/api/server/v2/generated/model/model_types.yml
new file mode 100644
index 000000000..ecb10ba22
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/model/model_types.yml
@@ -0,0 +1,8 @@
+package: model
+generate:
+ models: true
+output-options:
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+output: ./server/v2/generated/model/types.go
diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go
new file mode 100644
index 000000000..215d06e90
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/model/types.go
@@ -0,0 +1,1102 @@
+// Package model provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
+package model
+
+import (
+ "encoding/json"
+ "time"
+
+ openapi_types "github.com/algorand/oapi-codegen/pkg/types"
+)
+
+const (
+ Api_keyScopes = "api_key.Scopes"
+)
+
+// Defines values for AccountSigType.
+const (
+ AccountSigTypeLsig AccountSigType = "lsig"
+ AccountSigTypeMsig AccountSigType = "msig"
+ AccountSigTypeSig AccountSigType = "sig"
+)
+
+// Defines values for AddressRole.
+const (
+ FreezeTarget AddressRole = "freeze-target"
+ Receiver AddressRole = "receiver"
+ Sender AddressRole = "sender"
+)
+
+// Defines values for Format.
+const (
+ FormatJson Format = "json"
+ FormatMsgpack Format = "msgpack"
+)
+
+// Defines values for SigType.
+const (
+ SigTypeLsig SigType = "lsig"
+ SigTypeMsig SigType = "msig"
+ SigTypeSig SigType = "sig"
+)
+
+// Defines values for TxType.
+const (
+ Acfg TxType = "acfg"
+ Afrz TxType = "afrz"
+ Appl TxType = "appl"
+ Axfer TxType = "axfer"
+ Keyreg TxType = "keyreg"
+ Pay TxType = "pay"
+ Stpf TxType = "stpf"
+)
+
+// Defines values for TransactionProofResponseHashtype.
+const (
+ TransactionProofResponseHashtypeSha256 TransactionProofResponseHashtype = "sha256"
+ TransactionProofResponseHashtypeSha512256 TransactionProofResponseHashtype = "sha512_256"
+)
+
+// Defines values for AccountInformationParamsFormat.
+const (
+ AccountInformationParamsFormatJson AccountInformationParamsFormat = "json"
+ AccountInformationParamsFormatMsgpack AccountInformationParamsFormat = "msgpack"
+)
+
+// Defines values for AccountInformationParamsExclude.
+const (
+ All AccountInformationParamsExclude = "all"
+ None AccountInformationParamsExclude = "none"
+)
+
+// Defines values for AccountApplicationInformationParamsFormat.
+const (
+ AccountApplicationInformationParamsFormatJson AccountApplicationInformationParamsFormat = "json"
+ AccountApplicationInformationParamsFormatMsgpack AccountApplicationInformationParamsFormat = "msgpack"
+)
+
+// Defines values for AccountAssetInformationParamsFormat.
+const (
+ AccountAssetInformationParamsFormatJson AccountAssetInformationParamsFormat = "json"
+ AccountAssetInformationParamsFormatMsgpack AccountAssetInformationParamsFormat = "msgpack"
+)
+
+// Defines values for GetPendingTransactionsByAddressParamsFormat.
+const (
+ GetPendingTransactionsByAddressParamsFormatJson GetPendingTransactionsByAddressParamsFormat = "json"
+ GetPendingTransactionsByAddressParamsFormatMsgpack GetPendingTransactionsByAddressParamsFormat = "msgpack"
+)
+
+// Defines values for GetBlockParamsFormat.
+const (
+ GetBlockParamsFormatJson GetBlockParamsFormat = "json"
+ GetBlockParamsFormatMsgpack GetBlockParamsFormat = "msgpack"
+)
+
+// Defines values for GetTransactionProofParamsHashtype.
+const (
+ GetTransactionProofParamsHashtypeSha256 GetTransactionProofParamsHashtype = "sha256"
+ GetTransactionProofParamsHashtypeSha512256 GetTransactionProofParamsHashtype = "sha512_256"
+)
+
+// Defines values for GetTransactionProofParamsFormat.
+const (
+ GetTransactionProofParamsFormatJson GetTransactionProofParamsFormat = "json"
+ GetTransactionProofParamsFormatMsgpack GetTransactionProofParamsFormat = "msgpack"
+)
+
+// Defines values for GetPendingTransactionsParamsFormat.
+const (
+ GetPendingTransactionsParamsFormatJson GetPendingTransactionsParamsFormat = "json"
+ GetPendingTransactionsParamsFormatMsgpack GetPendingTransactionsParamsFormat = "msgpack"
+)
+
+// Defines values for PendingTransactionInformationParamsFormat.
+const (
+ Json PendingTransactionInformationParamsFormat = "json"
+ Msgpack PendingTransactionInformationParamsFormat = "msgpack"
+)
+
+// Account Account information at a given round.
+//
+// Definition:
+// data/basics/userBalance.go : AccountData
+type Account struct {
+ // Address the account public key
+ Address string `json:"address"`
+
+ // Amount \[algo\] total number of MicroAlgos in the account
+ Amount uint64 `json:"amount"`
+
+ // AmountWithoutPendingRewards specifies the amount of MicroAlgos in the account, without the pending rewards.
+ AmountWithoutPendingRewards uint64 `json:"amount-without-pending-rewards"`
+
+ // AppsLocalState \[appl\] applications local data stored in this account.
+ //
+ // Note the raw object uses `map[int] -> AppLocalState` for this type.
+ AppsLocalState *[]ApplicationLocalState `json:"apps-local-state,omitempty"`
+
+ // AppsTotalExtraPages \[teap\] the sum of all extra application program pages for this account.
+ AppsTotalExtraPages *uint64 `json:"apps-total-extra-pages,omitempty"`
+
+ // AppsTotalSchema Specifies maximums on the number of each type that may be stored.
+ AppsTotalSchema *ApplicationStateSchema `json:"apps-total-schema,omitempty"`
+
+ // Assets \[asset\] assets held by this account.
+ //
+ // Note the raw object uses `map[int] -> AssetHolding` for this type.
+ Assets *[]AssetHolding `json:"assets,omitempty"`
+
+ // AuthAddr \[spend\] the address against which signing should be checked. If empty, the address of the current account is used. This field can be updated in any transaction by setting the RekeyTo field.
+ AuthAddr *string `json:"auth-addr,omitempty"`
+
+ // CreatedApps \[appp\] parameters of applications created by this account including app global data.
+ //
+ // Note: the raw account uses `map[int] -> AppParams` for this type.
+ CreatedApps *[]Application `json:"created-apps,omitempty"`
+
+ // CreatedAssets \[apar\] parameters of assets created by this account.
+ //
+ // Note: the raw account uses `map[int] -> Asset` for this type.
+ CreatedAssets *[]Asset `json:"created-assets,omitempty"`
+
+ // MinBalance MicroAlgo balance required by the account.
+ //
+ // The requirement grows based on asset and application usage.
+ MinBalance uint64 `json:"min-balance"`
+
+ // Participation AccountParticipation describes the parameters used by this account in consensus protocol.
+ Participation *AccountParticipation `json:"participation,omitempty"`
+
+ // PendingRewards amount of MicroAlgos of pending rewards in this account.
+ PendingRewards uint64 `json:"pending-rewards"`
+
+ // RewardBase \[ebase\] used as part of the rewards computation. Only applicable to accounts which are participating.
+ RewardBase *uint64 `json:"reward-base,omitempty"`
+
+ // Rewards \[ern\] total rewards of MicroAlgos the account has received, including pending rewards.
+ Rewards uint64 `json:"rewards"`
+
+ // Round The round for which this information is relevant.
+ Round uint64 `json:"round"`
+
+ // SigType Indicates what type of signature is used by this account, must be one of:
+ // * sig
+ // * msig
+ // * lsig
+ SigType *AccountSigType `json:"sig-type,omitempty"`
+
+ // Status \[onl\] delegation status of the account's MicroAlgos
+ // * Offline - indicates that the associated account is delegated.
+ // * Online - indicates that the associated account used as part of the delegation pool.
+ // * NotParticipating - indicates that the associated account is neither a delegator nor a delegate.
+ Status string `json:"status"`
+
+ // TotalAppsOptedIn The count of all applications that have been opted in, equivalent to the count of application local data (AppLocalState objects) stored in this account.
+ TotalAppsOptedIn uint64 `json:"total-apps-opted-in"`
+
+ // TotalAssetsOptedIn The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.
+ TotalAssetsOptedIn uint64 `json:"total-assets-opted-in"`
+
+ // TotalBoxBytes \[tbxb\] The total number of bytes used by this account's app's box keys and values.
+ TotalBoxBytes *uint64 `json:"total-box-bytes,omitempty"`
+
+ // TotalBoxes \[tbx\] The number of existing boxes created by this account's app.
+ TotalBoxes *uint64 `json:"total-boxes,omitempty"`
+
+ // TotalCreatedApps The count of all apps (AppParams objects) created by this account.
+ TotalCreatedApps uint64 `json:"total-created-apps"`
+
+ // TotalCreatedAssets The count of all assets (AssetParams objects) created by this account.
+ TotalCreatedAssets uint64 `json:"total-created-assets"`
+}
+
+// AccountSigType Indicates what type of signature is used by this account, must be one of:
+// * sig
+// * msig
+// * lsig
+type AccountSigType string
+
+// AccountParticipation AccountParticipation describes the parameters used by this account in consensus protocol.
+type AccountParticipation struct {
+ // SelectionParticipationKey \[sel\] Selection public key (if any) currently registered for this round.
+ SelectionParticipationKey []byte `json:"selection-participation-key"`
+
+ // StateProofKey \[stprf\] Root of the state proof key (if any)
+ StateProofKey *[]byte `json:"state-proof-key,omitempty"`
+
+ // VoteFirstValid \[voteFst\] First round for which this participation is valid.
+ VoteFirstValid uint64 `json:"vote-first-valid"`
+
+ // VoteKeyDilution \[voteKD\] Number of subkeys in each batch of participation keys.
+ VoteKeyDilution uint64 `json:"vote-key-dilution"`
+
+ // VoteLastValid \[voteLst\] Last round for which this participation is valid.
+ VoteLastValid uint64 `json:"vote-last-valid"`
+
+ // VoteParticipationKey \[vote\] root participation public key (if any) currently registered for this round.
+ VoteParticipationKey []byte `json:"vote-participation-key"`
+}
+
+// AccountStateDelta Application state delta.
+type AccountStateDelta struct {
+ Address string `json:"address"`
+
+ // Delta Application state delta.
+ Delta StateDelta `json:"delta"`
+}
+
+// Application Application index and its parameters
+type Application struct {
+ // Id \[appidx\] application index.
+ Id uint64 `json:"id"`
+
+ // Params Stores the global information associated with an application.
+ Params ApplicationParams `json:"params"`
+}
+
+// ApplicationLocalState Stores local state associated with an application.
+type ApplicationLocalState struct {
+ // Id The application which this local state is for.
+ Id uint64 `json:"id"`
+
+ // KeyValue Represents a key-value store for use in an application.
+ KeyValue *TealKeyValueStore `json:"key-value,omitempty"`
+
+ // Schema Specifies maximums on the number of each type that may be stored.
+ Schema ApplicationStateSchema `json:"schema"`
+}
+
+// ApplicationParams Stores the global information associated with an application.
+type ApplicationParams struct {
+ // ApprovalProgram \[approv\] approval program.
+ ApprovalProgram []byte `json:"approval-program"`
+
+ // ClearStateProgram \[clearp\] approval program.
+ ClearStateProgram []byte `json:"clear-state-program"`
+
+ // Creator The address that created this application. This is the address where the parameters and global state for this application can be found.
+ Creator string `json:"creator"`
+
+ // ExtraProgramPages \[epp\] the amount of extra program pages available to this app.
+ ExtraProgramPages *uint64 `json:"extra-program-pages,omitempty"`
+
+ // GlobalState Represents a key-value store for use in an application.
+ GlobalState *TealKeyValueStore `json:"global-state,omitempty"`
+
+ // GlobalStateSchema Specifies maximums on the number of each type that may be stored.
+ GlobalStateSchema *ApplicationStateSchema `json:"global-state-schema,omitempty"`
+
+ // LocalStateSchema Specifies maximums on the number of each type that may be stored.
+ LocalStateSchema *ApplicationStateSchema `json:"local-state-schema,omitempty"`
+}
+
+// ApplicationStateSchema Specifies maximums on the number of each type that may be stored.
+type ApplicationStateSchema struct {
+ // NumByteSlice \[nbs\] num of byte slices.
+ NumByteSlice uint64 `json:"num-byte-slice"`
+
+ // NumUint \[nui\] num of uints.
+ NumUint uint64 `json:"num-uint"`
+}
+
+// Asset Specifies both the unique identifier and the parameters for an asset
+type Asset struct {
+ // Index unique asset identifier
+ Index uint64 `json:"index"`
+
+ // Params AssetParams specifies the parameters for an asset.
+ //
+ // \[apar\] when part of an AssetConfig transaction.
+ //
+ // Definition:
+ // data/transactions/asset.go : AssetParams
+ Params AssetParams `json:"params"`
+}
+
+// AssetHolding Describes an asset held by an account.
+//
+// Definition:
+// data/basics/userBalance.go : AssetHolding
+type AssetHolding struct {
+ // Amount \[a\] number of units held.
+ Amount uint64 `json:"amount"`
+
+ // AssetId Asset ID of the holding.
+ AssetID uint64 `json:"asset-id"`
+
+ // IsFrozen \[f\] whether or not the holding is frozen.
+ IsFrozen bool `json:"is-frozen"`
+}
+
+// AssetParams AssetParams specifies the parameters for an asset.
+//
+// \[apar\] when part of an AssetConfig transaction.
+//
+// Definition:
+// data/transactions/asset.go : AssetParams
+type AssetParams struct {
+ // Clawback \[c\] Address of account used to clawback holdings of this asset. If empty, clawback is not permitted.
+ Clawback *string `json:"clawback,omitempty"`
+
+ // Creator The address that created this asset. This is the address where the parameters for this asset can be found, and also the address where unwanted asset units can be sent in the worst case.
+ Creator string `json:"creator"`
+
+ // Decimals \[dc\] The number of digits to use after the decimal point when displaying this asset. If 0, the asset is not divisible. If 1, the base unit of the asset is in tenths. If 2, the base unit of the asset is in hundredths, and so on. This value must be between 0 and 19 (inclusive).
+ Decimals uint64 `json:"decimals"`
+
+ // DefaultFrozen \[df\] Whether holdings of this asset are frozen by default.
+ DefaultFrozen *bool `json:"default-frozen,omitempty"`
+
+ // Freeze \[f\] Address of account used to freeze holdings of this asset. If empty, freezing is not permitted.
+ Freeze *string `json:"freeze,omitempty"`
+
+ // Manager \[m\] Address of account used to manage the keys of this asset and to destroy it.
+ Manager *string `json:"manager,omitempty"`
+
+ // MetadataHash \[am\] A commitment to some unspecified asset metadata. The format of this metadata is up to the application.
+ MetadataHash *[]byte `json:"metadata-hash,omitempty"`
+
+ // Name \[an\] Name of this asset, as supplied by the creator. Included only when the asset name is composed of printable utf-8 characters.
+ Name *string `json:"name,omitempty"`
+
+ // NameB64 Base64 encoded name of this asset, as supplied by the creator.
+ NameB64 *[]byte `json:"name-b64,omitempty"`
+
+ // Reserve \[r\] Address of account holding reserve (non-minted) units of this asset.
+ Reserve *string `json:"reserve,omitempty"`
+
+ // Total \[t\] The total number of units of this asset.
+ Total uint64 `json:"total"`
+
+ // UnitName \[un\] Name of a unit of this asset, as supplied by the creator. Included only when the name of a unit of this asset is composed of printable utf-8 characters.
+ UnitName *string `json:"unit-name,omitempty"`
+
+ // UnitNameB64 Base64 encoded name of a unit of this asset, as supplied by the creator.
+ UnitNameB64 *[]byte `json:"unit-name-b64,omitempty"`
+
+ // Url \[au\] URL where more information about the asset can be retrieved. Included only when the URL is composed of printable utf-8 characters.
+ Url *string `json:"url,omitempty"`
+
+ // UrlB64 Base64 encoded URL where more information about the asset can be retrieved.
+ UrlB64 *[]byte `json:"url-b64,omitempty"`
+}
+
+// Box Box name and its content.
+type Box struct {
+ // Name \[name\] box name, base64 encoded
+ Name []byte `json:"name"`
+
+ // Value \[value\] box value, base64 encoded.
+ Value []byte `json:"value"`
+}
+
+// BoxDescriptor Box descriptor describes a Box.
+type BoxDescriptor struct {
+ // Name Base64 encoded box name
+ Name []byte `json:"name"`
+}
+
+// BuildVersion defines model for BuildVersion.
+type BuildVersion struct {
+ Branch string `json:"branch"`
+ BuildNumber uint64 `json:"build_number"`
+ Channel string `json:"channel"`
+ CommitHash string `json:"commit_hash"`
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+}
+
+// DryrunRequest Request data type for dryrun endpoint. Given the Transactions and simulated ledger state upload, run TEAL scripts and return debugging information.
+type DryrunRequest struct {
+ Accounts []Account `json:"accounts"`
+ Apps []Application `json:"apps"`
+
+ // LatestTimestamp LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.
+ LatestTimestamp uint64 `json:"latest-timestamp"`
+
+ // ProtocolVersion ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in.
+ ProtocolVersion string `json:"protocol-version"`
+
+ // Round Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to.
+ Round uint64 `json:"round"`
+ Sources []DryrunSource `json:"sources"`
+ Txns []json.RawMessage `json:"txns"`
+}
+
+// DryrunSource DryrunSource is TEAL source text that gets uploaded, compiled, and inserted into transactions or application state.
+type DryrunSource struct {
+ AppIndex uint64 `json:"app-index"`
+
+ // FieldName FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex].
+ FieldName string `json:"field-name"`
+ Source string `json:"source"`
+ TxnIndex uint64 `json:"txn-index"`
+}
+
+// DryrunState Stores the TEAL eval step data
+type DryrunState struct {
+ // Error Evaluation error if any
+ Error *string `json:"error,omitempty"`
+
+ // Line Line number
+ Line uint64 `json:"line"`
+
+ // Pc Program counter
+ Pc uint64 `json:"pc"`
+ Scratch *[]TealValue `json:"scratch,omitempty"`
+ Stack []TealValue `json:"stack"`
+}
+
+// DryrunTxnResult DryrunTxnResult contains any LogicSig or ApplicationCall program debug information and state updates from a dryrun.
+type DryrunTxnResult struct {
+ AppCallMessages *[]string `json:"app-call-messages,omitempty"`
+ AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"`
+
+ // BudgetAdded Budget added during execution of app call transaction.
+ BudgetAdded *uint64 `json:"budget-added,omitempty"`
+
+ // BudgetConsumed Budget consumed during execution of app call transaction.
+ BudgetConsumed *uint64 `json:"budget-consumed,omitempty"`
+
+ // Cost Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.
+ Cost *uint64 `json:"cost,omitempty"`
+
+ // Disassembly Disassembled program line by line.
+ Disassembly []string `json:"disassembly"`
+
+ // GlobalDelta Application state delta.
+ GlobalDelta *StateDelta `json:"global-delta,omitempty"`
+ LocalDeltas *[]AccountStateDelta `json:"local-deltas,omitempty"`
+
+ // LogicSigDisassembly Disassembled lsig program line by line.
+ LogicSigDisassembly *[]string `json:"logic-sig-disassembly,omitempty"`
+ LogicSigMessages *[]string `json:"logic-sig-messages,omitempty"`
+ LogicSigTrace *[]DryrunState `json:"logic-sig-trace,omitempty"`
+ Logs *[][]byte `json:"logs,omitempty"`
+}
+
+// ErrorResponse An error response with optional data field.
+type ErrorResponse struct {
+ Data *map[string]interface{} `json:"data,omitempty"`
+ Message string `json:"message"`
+}
+
+// EvalDelta Represents a TEAL value delta.
+type EvalDelta struct {
+ // Action \[at\] delta action.
+ Action uint64 `json:"action"`
+
+ // Bytes \[bs\] bytes value.
+ Bytes *string `json:"bytes,omitempty"`
+
+ // Uint \[ui\] uint value.
+ Uint *uint64 `json:"uint,omitempty"`
+}
+
+// EvalDeltaKeyValue Key-value pairs for StateDelta.
+type EvalDeltaKeyValue struct {
+ Key string `json:"key"`
+
+ // Value Represents a TEAL value delta.
+ Value EvalDelta `json:"value"`
+}
+
+// LightBlockHeaderProof Proof of membership and position of a light block header.
+type LightBlockHeaderProof struct {
+ // Index The index of the light block header in the vector commitment tree
+ Index uint64 `json:"index"`
+
+ // Proof The encoded proof.
+ Proof []byte `json:"proof"`
+
+ // Treedepth Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
+// ParticipationKey Represents a participation key used by the node.
+type ParticipationKey struct {
+ // Address Address the key was generated for.
+ Address string `json:"address"`
+
+ // EffectiveFirstValid When registered, this is the first round it may be used.
+ EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
+
+ // EffectiveLastValid When registered, this is the last round it may be used.
+ EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
+
+ // Id The key's ParticipationID.
+ Id string `json:"id"`
+
+ // Key AccountParticipation describes the parameters used by this account in consensus protocol.
+ Key AccountParticipation `json:"key"`
+
+ // LastBlockProposal Round when this key was last used to propose a block.
+ LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
+
+ // LastStateProof Round when this key was last used to generate a state proof.
+ LastStateProof *uint64 `json:"last-state-proof,omitempty"`
+
+ // LastVote Round when this key was last used to vote.
+ LastVote *uint64 `json:"last-vote,omitempty"`
+}
+
+// PendingTransactionResponse Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details.
+type PendingTransactionResponse struct {
+ // ApplicationIndex The application index if the transaction was found and it created an application.
+ ApplicationIndex *uint64 `json:"application-index,omitempty"`
+
+ // AssetClosingAmount The number of the asset's unit that were transferred to the close-to address.
+ AssetClosingAmount *uint64 `json:"asset-closing-amount,omitempty"`
+
+ // AssetIndex The asset index if the transaction was found and it created an asset.
+ AssetIndex *uint64 `json:"asset-index,omitempty"`
+
+ // CloseRewards Rewards in microalgos applied to the close remainder to account.
+ CloseRewards *uint64 `json:"close-rewards,omitempty"`
+
+ // ClosingAmount Closing amount for the transaction.
+ ClosingAmount *uint64 `json:"closing-amount,omitempty"`
+
+ // ConfirmedRound The round where this transaction was confirmed, if present.
+ ConfirmedRound *uint64 `json:"confirmed-round,omitempty"`
+
+ // GlobalStateDelta Application state delta.
+ GlobalStateDelta *StateDelta `json:"global-state-delta,omitempty"`
+
+ // InnerTxns Inner transactions produced by application execution.
+ InnerTxns *[]PendingTransactionResponse `json:"inner-txns,omitempty"`
+
+ // LocalStateDelta \[ld\] Local state key/value changes for the application being executed by this transaction.
+ LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
+
+ // Logs \[lg\] Logs for the application being executed by this transaction.
+ Logs *[][]byte `json:"logs,omitempty"`
+
+ // PoolError Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
+ PoolError string `json:"pool-error"`
+
+ // ReceiverRewards Rewards in microalgos applied to the receiver account.
+ ReceiverRewards *uint64 `json:"receiver-rewards,omitempty"`
+
+ // SenderRewards Rewards in microalgos applied to the sender account.
+ SenderRewards *uint64 `json:"sender-rewards,omitempty"`
+
+ // Txn The raw signed transaction.
+ Txn map[string]interface{} `json:"txn"`
+}
+
+// StateDelta Application state delta.
+type StateDelta = []EvalDeltaKeyValue
+
+// StateProof Represents a state proof and its corresponding message
+type StateProof struct {
+ // Message Represents the message that the state proofs are attesting to.
+ Message StateProofMessage `json:"Message"`
+
+ // StateProof The encoded StateProof for the message.
+ StateProof []byte `json:"StateProof"`
+}
+
+// StateProofMessage Represents the message that the state proofs are attesting to.
+type StateProofMessage struct {
+ // BlockHeadersCommitment The vector commitment root on all light block headers within a state proof interval.
+ BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"`
+
+ // FirstAttestedRound The first round the message attests to.
+ FirstAttestedRound uint64 `json:"FirstAttestedRound"`
+
+ // LastAttestedRound The last round the message attests to.
+ LastAttestedRound uint64 `json:"LastAttestedRound"`
+
+ // LnProvenWeight An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.
+ LnProvenWeight uint64 `json:"LnProvenWeight"`
+
+ // VotersCommitment The vector commitment root of the top N accounts to sign the next StateProof.
+ VotersCommitment []byte `json:"VotersCommitment"`
+}
+
+// TealKeyValue Represents a key-value pair in an application store.
+type TealKeyValue struct {
+ Key string `json:"key"`
+
+ // Value Represents a TEAL value.
+ Value TealValue `json:"value"`
+}
+
+// TealKeyValueStore Represents a key-value store for use in an application.
+type TealKeyValueStore = []TealKeyValue
+
+// TealValue Represents a TEAL value.
+type TealValue struct {
+ // Bytes \[tb\] bytes value.
+ Bytes string `json:"bytes"`
+
+ // Type \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
+ Type uint64 `json:"type"`
+
+ // Uint \[ui\] uint value.
+ Uint uint64 `json:"uint"`
+}
+
+// Version algod version information.
+type Version struct {
+ Build BuildVersion `json:"build"`
+ GenesisHashB64 []byte `json:"genesis_hash_b64"`
+ GenesisId string `json:"genesis_id"`
+ Versions []string `json:"versions"`
+}
+
+// AccountID defines model for account-id.
+type AccountID = string
+
+// Address defines model for address.
+type Address = string
+
+// AddressRole defines model for address-role.
+type AddressRole string
+
+// AfterTime defines model for after-time.
+type AfterTime = time.Time
+
+// AssetID defines model for asset-id.
+type AssetID uint64
+
+// BeforeTime defines model for before-time.
+type BeforeTime = time.Time
+
+// Catchpoint defines model for catchpoint.
+type Catchpoint = string
+
+// CurrencyGreaterThan defines model for currency-greater-than.
+type CurrencyGreaterThan uint64
+
+// CurrencyLessThan defines model for currency-less-than.
+type CurrencyLessThan uint64
+
+// ExcludeCloseTo defines model for exclude-close-to.
+type ExcludeCloseTo = bool
+
+// Format defines model for format.
+type Format string
+
+// Limit defines model for limit.
+type Limit uint64
+
+// Max defines model for max.
+type Max uint64
+
+// MaxRound defines model for max-round.
+type MaxRound uint64
+
+// MinRound defines model for min-round.
+type MinRound uint64
+
+// Next defines model for next.
+type Next = string
+
+// NotePrefix defines model for note-prefix.
+type NotePrefix = string
+
+// Round defines model for round.
+type Round uint64
+
+// RoundNumber defines model for round-number.
+type RoundNumber uint64
+
+// SigType defines model for sig-type.
+type SigType string
+
+// TxID defines model for tx-id.
+type TxID = string
+
+// TxType defines model for tx-type.
+type TxType string
+
+// AccountApplicationResponse defines model for AccountApplicationResponse.
+type AccountApplicationResponse struct {
+ // AppLocalState Stores local state associated with an application.
+ AppLocalState *ApplicationLocalState `json:"app-local-state,omitempty"`
+
+ // CreatedApp Stores the global information associated with an application.
+ CreatedApp *ApplicationParams `json:"created-app,omitempty"`
+
+ // Round The round for which this information is relevant.
+ Round uint64 `json:"round"`
+}
+
+// AccountAssetResponse defines model for AccountAssetResponse.
+type AccountAssetResponse struct {
+ // AssetHolding Describes an asset held by an account.
+ //
+ // Definition:
+ // data/basics/userBalance.go : AssetHolding
+ AssetHolding *AssetHolding `json:"asset-holding,omitempty"`
+
+ // CreatedAsset AssetParams specifies the parameters for an asset.
+ //
+ // \[apar\] when part of an AssetConfig transaction.
+ //
+ // Definition:
+ // data/transactions/asset.go : AssetParams
+ CreatedAsset *AssetParams `json:"created-asset,omitempty"`
+
+ // Round The round for which this information is relevant.
+ Round uint64 `json:"round"`
+}
+
+// AccountResponse Account information at a given round.
+//
+// Definition:
+// data/basics/userBalance.go : AccountData
+type AccountResponse = Account
+
+// ApplicationResponse Application index and its parameters
+type ApplicationResponse = Application
+
+// AssetResponse Specifies both the unique identifier and the parameters for an asset
+type AssetResponse = Asset
+
+// BlockHashResponse defines model for BlockHashResponse.
+type BlockHashResponse struct {
+ // BlockHash Block header hash.
+ BlockHash string `json:"blockHash"`
+}
+
+// BlockResponse defines model for BlockResponse.
+type BlockResponse struct {
+ // Block Block header data.
+ Block map[string]interface{} `json:"block"`
+
+ // Cert Optional certificate object. This is only included when the format is set to message pack.
+ Cert *map[string]interface{} `json:"cert,omitempty"`
+}
+
+// BoxResponse Box name and its content.
+type BoxResponse = Box
+
+// BoxesResponse defines model for BoxesResponse.
+type BoxesResponse struct {
+ Boxes []BoxDescriptor `json:"boxes"`
+}
+
+// CatchpointAbortResponse An catchpoint abort response.
+type CatchpointAbortResponse struct {
+ // CatchupMessage Catchup abort response string
+ CatchupMessage string `json:"catchup-message"`
+}
+
+// CatchpointStartResponse An catchpoint start response.
+type CatchpointStartResponse struct {
+ // CatchupMessage Catchup start response string
+ CatchupMessage string `json:"catchup-message"`
+}
+
+// CompileResponse defines model for CompileResponse.
+type CompileResponse struct {
+ // Hash base32 SHA512_256 of program bytes (Address style)
+ Hash string `json:"hash"`
+
+ // Result base64 encoded program bytes
+ Result string `json:"result"`
+
+ // Sourcemap JSON of the source map
+ Sourcemap *map[string]interface{} `json:"sourcemap,omitempty"`
+}
+
+// DisassembleResponse defines model for DisassembleResponse.
+type DisassembleResponse struct {
+ // Result disassembled Teal code
+ Result string `json:"result"`
+}
+
+// DryrunResponse defines model for DryrunResponse.
+type DryrunResponse struct {
+ Error string `json:"error"`
+
+ // ProtocolVersion Protocol version is the protocol version Dryrun was operated under.
+ ProtocolVersion string `json:"protocol-version"`
+ Txns []DryrunTxnResult `json:"txns"`
+}
+
+// LightBlockHeaderProofResponse Proof of membership and position of a light block header.
+type LightBlockHeaderProofResponse = LightBlockHeaderProof
+
+// NodeStatusResponse NodeStatus contains the information about a node status
+type NodeStatusResponse struct {
+ // Catchpoint The current catchpoint that is being caught up to
+ Catchpoint *string `json:"catchpoint,omitempty"`
+
+ // CatchpointAcquiredBlocks The number of blocks that have already been obtained by the node as part of the catchup
+ CatchpointAcquiredBlocks *uint64 `json:"catchpoint-acquired-blocks,omitempty"`
+
+ // CatchpointProcessedAccounts The number of accounts from the current catchpoint that have been processed so far as part of the catchup
+ CatchpointProcessedAccounts *uint64 `json:"catchpoint-processed-accounts,omitempty"`
+
+ // CatchpointProcessedKvs The number of key-values (KVs) from the current catchpoint that have been processed so far as part of the catchup
+ CatchpointProcessedKvs *uint64 `json:"catchpoint-processed-kvs,omitempty"`
+
+ // CatchpointTotalAccounts The total number of accounts included in the current catchpoint
+ CatchpointTotalAccounts *uint64 `json:"catchpoint-total-accounts,omitempty"`
+
+ // CatchpointTotalBlocks The total number of blocks that are required to complete the current catchpoint catchup
+ CatchpointTotalBlocks *uint64 `json:"catchpoint-total-blocks,omitempty"`
+
+ // CatchpointTotalKvs The total number of key-values (KVs) included in the current catchpoint
+ CatchpointTotalKvs *uint64 `json:"catchpoint-total-kvs,omitempty"`
+
+ // CatchpointVerifiedAccounts The number of accounts from the current catchpoint that have been verified so far as part of the catchup
+ CatchpointVerifiedAccounts *uint64 `json:"catchpoint-verified-accounts,omitempty"`
+
+ // CatchpointVerifiedKvs The number of key-values (KVs) from the current catchpoint that have been verified so far as part of the catchup
+ CatchpointVerifiedKvs *uint64 `json:"catchpoint-verified-kvs,omitempty"`
+
+ // CatchupTime CatchupTime in nanoseconds
+ CatchupTime uint64 `json:"catchup-time"`
+
+ // LastCatchpoint The last catchpoint seen by the node
+ LastCatchpoint *string `json:"last-catchpoint,omitempty"`
+
+ // LastRound LastRound indicates the last round seen
+ LastRound uint64 `json:"last-round"`
+
+ // LastVersion LastVersion indicates the last consensus version supported
+ LastVersion string `json:"last-version"`
+
+ // NextVersion NextVersion of consensus protocol to use
+ NextVersion string `json:"next-version"`
+
+ // NextVersionRound NextVersionRound is the round at which the next consensus version will apply
+ NextVersionRound uint64 `json:"next-version-round"`
+
+ // NextVersionSupported NextVersionSupported indicates whether the next consensus version is supported by this node
+ NextVersionSupported bool `json:"next-version-supported"`
+
+ // StoppedAtUnsupportedRound StoppedAtUnsupportedRound indicates that the node does not support the new rounds and has stopped making progress
+ StoppedAtUnsupportedRound bool `json:"stopped-at-unsupported-round"`
+
+ // TimeSinceLastRound TimeSinceLastRound in nanoseconds
+ TimeSinceLastRound uint64 `json:"time-since-last-round"`
+}
+
+// ParticipationKeyResponse Represents a participation key used by the node.
+type ParticipationKeyResponse = ParticipationKey
+
+// ParticipationKeysResponse defines model for ParticipationKeysResponse.
+type ParticipationKeysResponse = []ParticipationKey
+
+// PendingTransactionsResponse PendingTransactions is an array of signed transactions exactly as they were submitted.
+type PendingTransactionsResponse struct {
+ // TopTransactions An array of signed transaction objects.
+ TopTransactions []map[string]interface{} `json:"top-transactions"`
+
+ // TotalTransactions Total number of transactions in the pool.
+ TotalTransactions uint64 `json:"total-transactions"`
+}
+
+// PostParticipationResponse defines model for PostParticipationResponse.
+type PostParticipationResponse struct {
+ // PartId encoding of the participation ID.
+ PartId string `json:"partId"`
+}
+
+// PostTransactionsResponse defines model for PostTransactionsResponse.
+type PostTransactionsResponse struct {
+ // TxId encoding of the transaction hash.
+ TxId string `json:"txId"`
+}
+
+// StateProofResponse Represents a state proof and its corresponding message
+type StateProofResponse = StateProof
+
+// SupplyResponse Supply represents the current supply of MicroAlgos in the system
+type SupplyResponse struct {
+ // CurrentRound Round
+ CurrentRound uint64 `json:"current_round"`
+
+ // OnlineMoney OnlineMoney
+ OnlineMoney uint64 `json:"online-money"`
+
+ // TotalMoney TotalMoney
+ TotalMoney uint64 `json:"total-money"`
+}
+
+// TransactionParametersResponse TransactionParams contains the parameters that help a client construct
+// a new transaction.
+type TransactionParametersResponse struct {
+ // ConsensusVersion ConsensusVersion indicates the consensus protocol version
+ // as of LastRound.
+ ConsensusVersion string `json:"consensus-version"`
+
+ // Fee Fee is the suggested transaction fee
+ // Fee is in units of micro-Algos per byte.
+ // Fee may fall to zero but transactions must still have a fee of
+ // at least MinTxnFee for the current network protocol.
+ Fee uint64 `json:"fee"`
+
+ // GenesisHash GenesisHash is the hash of the genesis block.
+ GenesisHash []byte `json:"genesis-hash"`
+
+ // GenesisId GenesisID is an ID listed in the genesis block.
+ GenesisId string `json:"genesis-id"`
+
+ // LastRound LastRound indicates the last round seen
+ LastRound uint64 `json:"last-round"`
+
+ // MinFee The minimum transaction fee (not per byte) required for the
+ // txn to validate for the current network protocol.
+ MinFee uint64 `json:"min-fee"`
+}
+
+// TransactionProofResponse defines model for TransactionProofResponse.
+type TransactionProofResponse struct {
+ // Hashtype The type of hash function used to create the proof, must be one of:
+ // * sha512_256
+ // * sha256
+ Hashtype TransactionProofResponseHashtype `json:"hashtype"`
+
+ // Idx Index of the transaction in the block's payset.
+ Idx uint64 `json:"idx"`
+
+ // Proof Proof of transaction membership.
+ Proof []byte `json:"proof"`
+
+ // Stibhash Hash of SignedTxnInBlock for verifying proof.
+ Stibhash []byte `json:"stibhash"`
+
+ // Treedepth Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
+// TransactionProofResponseHashtype The type of hash function used to create the proof, must be one of:
+// * sha512_256
+// * sha256
+type TransactionProofResponseHashtype string
+
+// VersionsResponse algod version information.
+type VersionsResponse = Version
+
+// AccountInformationParams defines parameters for AccountInformation.
+type AccountInformationParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *AccountInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+
+ // Exclude When set to `all` will exclude asset holdings, application local state, created asset parameters, any created application parameters. Defaults to `none`.
+ Exclude *AccountInformationParamsExclude `form:"exclude,omitempty" json:"exclude,omitempty"`
+}
+
+// AccountInformationParamsFormat defines parameters for AccountInformation.
+type AccountInformationParamsFormat string
+
+// AccountInformationParamsExclude defines parameters for AccountInformation.
+type AccountInformationParamsExclude string
+
+// AccountApplicationInformationParams defines parameters for AccountApplicationInformation.
+type AccountApplicationInformationParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *AccountApplicationInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// AccountApplicationInformationParamsFormat defines parameters for AccountApplicationInformation.
+type AccountApplicationInformationParamsFormat string
+
+// AccountAssetInformationParams defines parameters for AccountAssetInformation.
+type AccountAssetInformationParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *AccountAssetInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// AccountAssetInformationParamsFormat defines parameters for AccountAssetInformation.
+type AccountAssetInformationParamsFormat string
+
+// GetPendingTransactionsByAddressParams defines parameters for GetPendingTransactionsByAddress.
+type GetPendingTransactionsByAddressParams struct {
+ // Max Truncated number of transactions to display. If max=0, returns all pending txns.
+ Max *uint64 `form:"max,omitempty" json:"max,omitempty"`
+
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *GetPendingTransactionsByAddressParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetPendingTransactionsByAddressParamsFormat defines parameters for GetPendingTransactionsByAddress.
+type GetPendingTransactionsByAddressParamsFormat string
+
+// GetApplicationBoxByNameParams defines parameters for GetApplicationBoxByName.
+type GetApplicationBoxByNameParams struct {
+ // Name A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.
+ Name string `form:"name" json:"name"`
+}
+
+// GetApplicationBoxesParams defines parameters for GetApplicationBoxes.
+type GetApplicationBoxesParams struct {
+ // Max Max number of box names to return. If max is not set, or max == 0, returns all box-names.
+ Max *uint64 `form:"max,omitempty" json:"max,omitempty"`
+}
+
+// GetBlockParams defines parameters for GetBlock.
+type GetBlockParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *GetBlockParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetBlockParamsFormat defines parameters for GetBlock.
+type GetBlockParamsFormat string
+
+// GetTransactionProofParams defines parameters for GetTransactionProof.
+type GetTransactionProofParams struct {
+ // Hashtype The type of hash function used to create the proof, must be one of:
+ // * sha512_256
+ // * sha256
+ Hashtype *GetTransactionProofParamsHashtype `form:"hashtype,omitempty" json:"hashtype,omitempty"`
+
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *GetTransactionProofParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetTransactionProofParamsHashtype defines parameters for GetTransactionProof.
+type GetTransactionProofParamsHashtype string
+
+// GetTransactionProofParamsFormat defines parameters for GetTransactionProof.
+type GetTransactionProofParamsFormat string
+
+// ShutdownNodeParams defines parameters for ShutdownNode.
+type ShutdownNodeParams struct {
+ Timeout *uint64 `form:"timeout,omitempty" json:"timeout,omitempty"`
+}
+
+// TealCompileTextBody defines parameters for TealCompile.
+type TealCompileTextBody = openapi_types.File
+
+// TealCompileParams defines parameters for TealCompile.
+type TealCompileParams struct {
+ // Sourcemap When set to `true`, returns the source map of the program as a JSON. Defaults to `false`.
+ Sourcemap *bool `form:"sourcemap,omitempty" json:"sourcemap,omitempty"`
+}
+
+// GetPendingTransactionsParams defines parameters for GetPendingTransactions.
+type GetPendingTransactionsParams struct {
+ // Max Truncated number of transactions to display. If max=0, returns all pending txns.
+ Max *uint64 `form:"max,omitempty" json:"max,omitempty"`
+
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *GetPendingTransactionsParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetPendingTransactionsParamsFormat defines parameters for GetPendingTransactions.
+type GetPendingTransactionsParamsFormat string
+
+// PendingTransactionInformationParams defines parameters for PendingTransactionInformation.
+type PendingTransactionInformationParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded.
+ Format *PendingTransactionInformationParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// PendingTransactionInformationParamsFormat defines parameters for PendingTransactionInformation.
+type PendingTransactionInformationParamsFormat string
+
+// TealCompileTextRequestBody defines body for TealCompile for text/plain ContentType.
+type TealCompileTextRequestBody = TealCompileTextBody
+
+// TealDryrunJSONRequestBody defines body for TealDryrun for application/json ContentType.
+type TealDryrunJSONRequestBody = DryrunRequest
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml b/daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml
new file mode 100644
index 000000000..8967301b8
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/private_routes.yml
@@ -0,0 +1,19 @@
+package: private
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ include-tags:
+ - nonparticipating
+ - private
+ exclude-tags:
+ - public
+ - participating
+ - common
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+additional-imports:
+ - alias: "."
+ package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+output: ./server/v2/generated/nonparticipating/private/routes.go
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
new file mode 100644
index 000000000..538419b26
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
@@ -0,0 +1,359 @@
+// Package private provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
+package private
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+
+ . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/oapi-codegen/pkg/runtime"
+ "github.com/getkin/kin-openapi/openapi3"
+ "github.com/labstack/echo/v4"
+)
+
+// ServerInterface represents all server handlers.
+type ServerInterface interface {
+ // Aborts a catchpoint catchup.
+ // (DELETE /v2/catchup/{catchpoint})
+ AbortCatchup(ctx echo.Context, catchpoint string) error
+ // Starts a catchpoint catchup.
+ // (POST /v2/catchup/{catchpoint})
+ StartCatchup(ctx echo.Context, catchpoint string) error
+
+ // (POST /v2/shutdown)
+ ShutdownNode(ctx echo.Context, params ShutdownNodeParams) error
+}
+
+// ServerInterfaceWrapper converts echo contexts to parameters.
+type ServerInterfaceWrapper struct {
+ Handler ServerInterface
+}
+
+// AbortCatchup converts echo context to params.
+func (w *ServerInterfaceWrapper) AbortCatchup(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "catchpoint" -------------
+ var catchpoint string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "catchpoint", runtime.ParamLocationPath, ctx.Param("catchpoint"), &catchpoint)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AbortCatchup(ctx, catchpoint)
+ return err
+}
+
+// StartCatchup converts echo context to params.
+func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "catchpoint" -------------
+ var catchpoint string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "catchpoint", runtime.ParamLocationPath, ctx.Param("catchpoint"), &catchpoint)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.StartCatchup(ctx, catchpoint)
+ return err
+}
+
+// ShutdownNode converts echo context to params.
+func (w *ServerInterfaceWrapper) ShutdownNode(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params ShutdownNodeParams
+ // ------------- Optional query parameter "timeout" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "timeout", ctx.QueryParams(), &params.Timeout)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter timeout: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.ShutdownNode(ctx, params)
+ return err
+}
+
+// This is a simple interface which specifies echo.Route addition functions which
+// are present on both echo.Echo and echo.Group, since we want to allow using
+// either of them for path registration
+type EchoRouter interface {
+ CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+}
+
+// RegisterHandlers adds each server route to the EchoRouter.
+func RegisterHandlers(router EchoRouter, si ServerInterface, m ...echo.MiddlewareFunc) {
+ RegisterHandlersWithBaseURL(router, si, "", m...)
+}
+
+// Registers handlers, and prepends BaseURL to the paths, so that the paths
+// can be served under a prefix.
+func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string, m ...echo.MiddlewareFunc) {
+
+ wrapper := ServerInterfaceWrapper{
+ Handler: si,
+ }
+
+ router.DELETE(baseURL+"/v2/catchup/:catchpoint", wrapper.AbortCatchup, m...)
+ router.POST(baseURL+"/v2/catchup/:catchpoint", wrapper.StartCatchup, m...)
+ router.POST(baseURL+"/v2/shutdown", wrapper.ShutdownNode, m...)
+
+}
+
+// Base64 encoded, gzipped, json marshaled Swagger object
+var swaggerSpec = []string{
+
+ "H4sIAAAAAAAC/+x9+5PbNtLgv4LSflV+nCjN+JFdT1Xqu4ntZOfiOC7PbPbus30JRLYk7JAAA4AjKb75",
+ "36/QAEiQBCXOI86Xqv3JHhGPRqPR6Dc+T1JRlIID12py8nlSUkkL0CDxL5qmouI6YZn5KwOVSlZqJvjk",
+ "xH8jSkvGV5PphJlfS6rXk+mE0wKaNqb/dCLh14pJyCYnWlYwnah0DQU1A+tdaVrXI22TlUjcEKd2iLNX",
+ "k+s9H2iWSVCqD+WPPN8RxtO8yoBoSbmiqfmkyIbpNdFrpojrTBgnggMRS6LXrcZkySDP1Mwv8tcK5C5Y",
+ "pZt8eEnXDYiJFDn04XwpigXj4KGCGqh6Q4gWJIMlNlpTTcwMBlbfUAuigMp0TZZCHgDVAhHCC7wqJicf",
+ "Jgp4BhJ3KwV2hf9dSoDfINFUrkBPPk1ji1tqkIlmRWRpZw77ElSVa0WwLa5xxa6AE9NrRn6olCYLIJST",
+ "99++JE+fPn1hFlJQrSFzRDa4qmb2cE22++RkklEN/nOf1mi+EpLyLKnbv//2Jc5/7hY4thVVCuKH5dR8",
+ "IWevhhbgO0ZIiHENK9yHFvWbHpFD0fy8gKWQMHJPbON73ZRw/j90V1Kq03UpGNeRfSH4ldjPUR4WdN/H",
+ "w2oAWu1LgylpBv1wlLz49Pl4enx0/ZcPp8l/uT+fP70eufyX9bgHMBBtmFZSAk93yUoCxdOypryPj/eO",
+ "HtRaVHlG1vQKN58WyOpdX2L6WtZ5RfPK0AlLpTjNV0IR6sgogyWtck38xKTiuWFTZjRH7YQpUkpxxTLI",
+ "pob7btYsXZOUKjsEtiMblueGBisF2RCtxVe35zBdhygxcN0KH7ig/77IaNZ1ABOwRW6QpLlQkGhx4Hry",
+ "Nw7lGQkvlOauUje7rMjFGghObj7YyxZxxw1N5/mOaNzXjFBFKPFX05SwJdmJimxwc3J2if3dagzWCmKQ",
+ "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFNmsQa/dnSdBlYIrIGLxL0i12fb/df7jWyIk+QGU",
+ "oit4R9NLAjwV2fAeu0ljN/i/lDAbXqhVSdPL+HWds4JFQP6BbllRFYRXxQKk2S9/P2hBJOhK8iGA7IgH",
+ "6Kyg2/6kF7LiKW5uM21LUDOkxFSZ092MnC1JQbdfH00dOIrQPCcl8IzxFdFbPiikmbkPg5dIUfFshAyj",
+ "zYYFt6YqIWVLBhmpR9kDiZvmEDyM3wyeRrIKwPGDDIJTz3IAHA7bCM2Yo2u+kJKuICCZGfmH41z4VYtL",
+ "4DWDI4sdfiolXDFRqbrTAIw49X7xmgsNSSlhySI0du7QYbiHbePYa+EEnFRwTRmHzHBeBFposJxoEKZg",
+ "wv3KTP+KXlAFXz0busCbryN3fym6u753x0ftNjZK7JGM3IvmqzuwcbGp1X+E8hfOrdgqsT/3NpKtLsxV",
+ "smQ5XjP/Mvvn0VApZAItRPiLR7EVp7qScPKRPzZ/kYSca8ozKjPzS2F/+qHKNTtnK/NTbn96I1YsPWer",
+ "AWTWsEa1KexW2H/MeHF2rLdRpeGNEJdVGS4obWmlix05ezW0yXbMmxLmaa3KhlrFxdZrGjftobf1Rg4A",
+ "OYi7kpqGl7CTYKCl6RL/2S6RnuhS/mb+Kcvc9NblMoZaQ8fuvkXbgLMZnJZlzlJqkPjefTZfDRMAqyXQ",
+ "psUcL9STzwGIpRQlSM3soLQsk1ykNE+UphpH+g8Jy8nJ5C/zxrgyt93VPJj8jel1jp2MPGplnISW5Q3G",
+ "eGfkGrWHWRgGjZ+QTVi2hxIR43YTDSkxw4JzuKJczxp9pMUP6gP8wc3U4NuKMhbfHf1qEOHENlyAsuKt",
+ "bfhAkQD1BNFKEK0oba5ysah/eHhalg0G8ftpWVp8oGgIDKUu2DKl1SNcPm1OUjjP2asZ+S4cG+VswfOd",
+ "uRysqGHuhqW7tdwtVhuO3BqaER8ogtsp5MxsjUeDkeHvg+JQZ1iL3Eg9B2nFNP67axuSmfl9VOc/B4mF",
+ "uB0mLtSiHOasAoO/BJrLww7l9AnH2XJm5LTb93ZkY0aJE8ytaGXvftpx9+CxRuFG0tIC6L7Yu5Rx1MBs",
+ "IwvrHbnpSEYXhTk4wwGtIVS3PmsHz0MUEiSFDgzf5CK9/DtV63s48ws/Vv/44TRkDTQDSdZUrWeTmJQR",
+ "Hq9mtDFHzDRE7Z0sgqlm9RLva3kHlpZRTYOlOXjjYolFPfZDpgcyorv8iP+hOTGfzdk2rN8OOyMXyMCU",
+ "Pc7Og5AZVd4qCHYm0wBNDIIUVnsnRuu+EZQvm8nj+zRqj15bg4HbIbcI3CGxvfdj8I3YxmD4Rmx7R0Bs",
+ "Qd0HfZhxUIzUUKgR8L1ykAncf4c+KiXd9ZGMY49BslmgEV0VngYe3vhmlsbyeroQ8nbcp8NWOGnsyYSa",
+ "UQPmO+0gCZtWZeJIMWKTsg06AzUuvP1Mozt8DGMtLJxr+jtgQZlR7wML7YHuGwuiKFkO90D66yjTX1AF",
+ "T5+Q87+fPj9+8vOT518ZkiylWElakMVOgyIPnW5GlN7l8Ki/MtSOqlzHR//qmbdCtseNjaNEJVMoaNkf",
+ "ylo3rQhkmxHTro+1Nppx1TWAYw7nBRhObtFOrOHegPaKKSNhFYt72YwhhGXNLBlxkGRwkJhuurxmml24",
+ "RLmT1X2osiClkBH7Gh4xLVKRJ1cgFRMRV8k714K4Fl68Lbu/W2jJhipi5kbTb8VRoIhQlt7y8XzfDn2x",
+ "5Q1u9nJ+u97I6ty8Y/aljXxvSVSkBJnoLScZLKpVSxNaSlEQSjLsiHf0G7Za60BkeSeFWN77rR2dJbYk",
+ "/GAFvtz06Yt9b0UGRu2u1D2w92awBnuGckKc0YWoNKGEiwxQR69UnPEPOHrRw4SOMR3eJXptZbgFGH0w",
+ "pZVZbVUSdPv0aLHpmNDUUlGCqFEDdvHaoWFb2emsEzGXQDOjJwInYuGMz84sjouk6LPSnnW6ayeiObfg",
+ "KqVIQSmj31ut7SBovp0lS70HTwg4AlzPQpQgSyrvDOzl1UE4L2GXoIdVkYff/6Qe/QHwaqFpfgCx2CaG",
+ "3lqFcB6GPtTjpt9HcN3JQ7KjEojnfUZfMQwiBw1DKLwRTgb3rwtRbxfvjpYrkGjr/10p3k9yNwKqQf2d",
+ "6f2u0FblQNyQE50vWIGWIE65UJAKnqnoYDlVOjnElk2jlnxvVhBwwhgnxoEHrJFvqNLWP8V4hmq1vU5w",
+ "HmumNFMMAzwo4piRf/LSTX/s1NyDXFWqFnVUVZZCashia+Cw3TPXW9jWc4llMHYtT2lBKgWHRh7CUjC+",
+ "Q5ZdiUUQ1bUZ1zlw+4tDY6e553dRVLaAaBCxD5Bz3yrAbhg7MQAIUw2iLeEw1aGcOmBjOlFalKXhFjqp",
+ "eN1vCE3ntvWp/kfTtk9cVDf3dibAzK49TA7yjcWsjZpZU6OU4cikoJdG9kAVyzrS+jCbw5goxlNI9lG+",
+ "OZbnplV4BA4c0gHt1sXlBbN1DkeHfqNEN0gEB3ZhaMEDqvY7KjVLWYmS4vewu3fBuTtB1ABMMtCUGfUv",
+ "+GCF6DLsT6xntDvm7QTpUVpRH/yeWhRZTs4UXhht4C9hh56gdzbk5iII1LkHTSAyqjndlBME1DvyjQAT",
+ "NoEtTXW+M9ecXsOObEACUdWiYFrbGKq2oqBFmYQDRC1Oe2Z05lUbruJ3YIy99xyHCpbX34rpxEpU++G7",
+ "6IhVLXQ4SaoUIh/haeshIwrBKE8cKYXZdeZC9nxcl6ekFpBOiEHbes08H6gWmnEF5P+IiqSUo8Baaahv",
+ "BCGRzeL1a2YwF1g9p/O5NRiCHAqwcjh+efy4u/DHj92eM0WWsPFxrqZhFx2PH6MW/E4o3Tpc92CCMcft",
+ "LMLb0RRnLgonw3V5ymGfjxt5zE6+6wxe2+/MmVLKEa5Z/p0ZQOdkbsesPaSRcf4uHHeUlS0YOrZu3HcM",
+ "OPh9bDTN0DHo+hMHbtrm45Cn1shX+e4e+LQdiEgoJSg8VaFeouxXsQxDod2xUzuloeibbmzXnwcEm/de",
+ "LOhJmYLnjENSCA67aPYP4/ADfoz1tid7oDPy2KG+XbGpBX8HrPY8Y6jwrvjF3Q5I+V0donAPm98dt2O1",
+ "C4PAUSuFvCSUpDlDnVVwpWWV6o+colQcnOWIK8fL+sN60kvfJK6YRfQmN9RHTtGNV8vKUfPzEiJa8LcA",
+ "Xl1S1WoFSnfkgyXAR+5aMU4qzjTOVZj9SuyGlSDRnzKzLQu6I0uao1r3G0hBFpVu35gYq6q00bqsCdFM",
+ "Q8TyI6ea5GA00B8Yv9jicD4k1NMMB70R8rLGwix6HlbAQTGVxF1O39mvGA3glr92kQGYOGQ/W6OTGb8J",
+ "aN1paCXD/N+H/3ny4TT5L5r8dpS8+B/zT5+fXT963PvxyfXXX/+/9k9Pr79+9J//EdspD3ssktJBfvbK",
+ "SZNnr1BkaKxOPdi/mMWhYDyJEtnFGkjBOAbkd2iLPDSCjyegR41Zz+36R6633BDSFc1ZRvXtyKHL4npn",
+ "0Z6ODtW0NqKjQPq1fopFR6xEUtL0Ej22kxXT62oxS0Ux91L0fCVqiXqeUSgEx2/ZnJZsrkpI51fHB670",
+ "O/ArEmFXHSZ7a4Gg7++NRz+jQdUFNOPJW1bcEkWlnFEXg/u8300sp3WEu81sPSEY/rym3mns/nzy/KvJ",
+ "tAlbrr8bTd1+/RQ5EyzbxoLTM9jGJDV31PCIPVCkpDsFOs6HEPaoi9H6pcJhCzAivlqz8svzHKXZIs4r",
+ "fciU0/i2/IzbWCZzEtE8u3NWH7H88nBrCZBBqdexjLeWzIGtmt0E6LjMSimugE8Jm8Gsq3FlK1De2ZkD",
+ "XWLmFZoYxZgQ0PocWELzVBFgPVzIKLUmRj8oJju+fz2dODFC3btk7waOwdWds7bF+r+1IA++e31B5o71",
+ "qgc2T8IOHUS2RywZLniz5Uw13Mzm+dpEkY/8I38FS8aZ+X7ykWdU0/mCKpaqeaVAfkNzylOYrQQ58fGg",
+ "r6imH3lPZhtMxQ8icUlZLXKWkstQtm7I06ZX9kf4+PGD4fgfP37q+ZX6krCbKspf7ATJhum1qHTi8scS",
+ "CRsqswjoqs4fwpFt9ue+WafEjW1ZsctPc+PHeR4tS9XNI+gvvyxzs/yADJWLkjdbRpQW0ks1RtSx0OD+",
+ "vhXuYpB045MPKwWK/FLQ8gPj+hNJPlZHR0+BtALrf3HCg6HJXQktm9et8hy69i5cuNWQYKslTUq6AhVd",
+ "vgZa4u6j5F2gdTXPCXZrBfT7gCUcqlmAx8fwBlg4bhycjIs7t718IYD4EvATbiG2MeJG47S47X4FIf63",
+ "3q5OmkBvlyq9TszZjq5KGRL3O1PnB6+MkOU9SYqtuDkELpV6ASRdQ3oJGWZ1QlHq3bTV3TsrncjqWQdT",
+ "NvvZBuhiih6aBxdAqjKjTqinfNfNlVKgtU8Qew+XsLsQTYbfTZKj2rk6auigIqUG0qUh1vDYujG6m+88",
+ "4pifUJY+5QVjnz1ZnNR04fsMH2Qr8t7DIY4RRSuXZAgRVEYQYYl/AAW3WKgZ706kH1ue0VcW9uaLJEt7",
+ "3k9ck0YNc87rcDWYImO/F4ClFMRGkQU1crtwVQBsPkrAxSpFVzAgIYcW2pFZHy2rLg5y6N6L3nRi2b3Q",
+ "evdNFGTbODFrjlIKmC+GVFCZ6YQs+JmsEwBXMCNY3MchbJGjmFTHdlimQ2XLUm6rlQyBFidgkLwRODwY",
+ "bYyEks2aKl+gAOs4+LM8Sgb4HfOr9mXVngXe9qBYQ50z63lu95z2tEuXW+sTan0WbahajsiINRI+BvjF",
+ "tkNwFIAyyGFlF24be0Jpcr2aDTJw/Lhc5owDSWKOe6qUSJmtMNFcM24OMPLxY0KsMZmMHiFGxgHY6NzC",
+ "gclbEZ5NvroJkNzlqlE/NrrFgr8hHlZrQ9mMyCNKw8IZHwia9ByAumiP+v7qxBzhMITxKTFs7ormhs05",
+ "ja8ZpJfciWJrJ5XTuVcfDYmze2z59mK50ZrsVXSb1YQykwc6LtDtgXghtomNq49KvIvtwtB7NLoPo/xj",
+ "B9Om0T5QZCG26LLHq8VGkx2AZRgOD0ag4W+ZQnrFfkO3uQVm37T7pakYFSokGWfOq8llSJwYM/WABDNE",
+ "Lg+DzNhbAdAxdjQ15Jzye1BJbYsn/cu8udWmTcUHHzgdO/5DRyi6SwP461th6lzWd12JJWqnaHue22m8",
+ "gQgZI3rDJvrunr5TSUEOqBQkLSEquYw5AY1uA3jjnPtugfECk4Up3z0KwhkkrJjS0JjjzcXs/Utf2jxJ",
+ "sUaJEMvh1elSLs363gtRX1M2CR47tpb5xVdwJTQkSyaVTtCXEV2CafStQqX6W9M0Liu1AyZsuS6WxXkD",
+ "TnsJuyRjeRWnVzfv96/MtG9rlqiqBfJbxgnQdE0WWF4uGka1Z2obabd3wW/sgt/Qe1vvuNNgmpqJpSGX",
+ "9hx/knPR4bz72EGEAGPE0d+1QZTuYZAo+7yCXMcyIAO5yR7OzDSc7bO+9g5T5sc+GIBioRi+o+xI0bUE",
+ "BoO9q2DoJjJiCdNBdbZ+Vs/AGaBlybJtxxZqRx3UmOmNDB6+7EUHC7i7brADGAjsnrHAYgmqXeGkEfBt",
+ "nb1WgvFsFGYu2nVIQoYQTsWUrxLbR1SdeHAIVxdA8+9h95Npi8uZXE8ndzOdxnDtRjyA63f19kbxjE5+",
+ "a0preUJuiHJallJc0TxxBuYh0pTiypEmNvf26C/M6uJmzIvXp2/eOfCvp5M0ByqTWlQYXBW2K/80q7LF",
+ "VAYOiK9CaXQ+L7NbUTLY/LoCRGiU3qzBVfwLpNFeaaLG4RAcRWekXsZjjQ6anJ1vxC5xj48EytpF0pjv",
+ "rIek7RWhV5Tl3m7moR2IC8LFjatvFeUK4QB39q4ETrLkXtlN73THT0dDXQd4UjjXnpqEhS27qYjgXRe6",
+ "ESHRHIekWlAsLGStIn3mxKsCLQmJylkat7HyhTLEwa3vzDQm2HhAGDUjVmzAFcsrFoxlmqkRim4HyGCO",
+ "KDJ9kaoh3C2Eq5decfZrBYRlwLX5JPFUdg4qVnJy1vb+dWpkh/5cbmBroW+Gv4uMERbV6t54CMR+ASP0",
+ "1PXAfVWrzH6htUXK/BC4JG7g8A9n7F2Je5z1jj4cNdswyHXb4xaWN+/zP0MYthTm4drqXnl11b0G5ojW",
+ "SmcqWUrxG8T1PFSPI1kHvowYwyiX34DPIslbXRZTW3eaku/N7IPbPSTdhFaodpDCANXjzgduOaxn5C3U",
+ "lNuttqWLW7FucYIJ41PndvyGYBzMvZjenG4WNFbsyQgZBqbTxgHcsqVrQXxnj3tn9meustuMBL7kui2z",
+ "+XglyCYhqJ/bf0uBwU47WlRoJAOk2lAmmFr/X65EZJiKbyi3FbBNP3uUXG8F1vhlem2ExGxaFTf7Z5Cy",
+ "guZxySFL+ybejK2Yrf9cKQgKDLuBbOF8S0WuSLN1sTeoOVuSo2lQwtztRsaumGKLHLDFsW2xoAo5eW2I",
+ "qruY5QHXa4XNn4xovq54JiHTa2URqwSphTpUb2rn1QL0BoCTI2x3/II8RLedYlfwyGDR3c+Tk+MXaHS1",
+ "fxzFLgBX6H0fN8mQnfzTsZM4HaPf0o5hGLcbdRbNDbWvcwwzrj2nyXYdc5awpeN1h89SQTldQTxSpDgA",
+ "k+2Lu4mGtA5eeGZLyystxY4wHZ8fNDX8aSCO3bA/CwZJRVEwXTjnjhKFoaemerCd1A9n69S7wm8eLv8R",
+ "faSldxF1lMgvazS191ts1ejJfksLaKN1SqhNoc5ZE73gy1GSM1+hASvh1QXwLG7MXGbpKOZgMMOSlJJx",
+ "jYpFpZfJ30i6ppKmhv3NhsBNFl89i1T/a1eh4jcD/IvjXYICeRVHvRwgey9DuL7kIRc8KQxHyR41eSPB",
+ "qRx05sbddkO+w/1DjxXKzCjJILlVLXKjAae+E+HxPQPekRTr9dyIHm+8si9OmZWMkwetzA794/0bJ2UU",
+ "QsbKLjXH3UkcErRkcIWxe/FNMmPecS9kPmoX7gL9H+t58CJnIJb5sxxTBL4REe3UV6SsLekuVj1iHRg6",
+ "puaDIYOFG2pK2tX/vrzTzxuf+84n88XDin90gf2DtxSR7FcwsIlBZdLodmb198D/Tck3Yjt2UzsnxG/s",
+ "fwPURFFSsTz7qcnv7BR+lZSn66g/a2E6/tw8UVEvzt5P0epGa8o55NHhrCz4s5cZI1Ltv8TYeQrGR7bt",
+ "1qK1y+0srgG8DaYHyk9o0Mt0biYIsdpOeKsDqvOVyAjO05TSabhnv4ZxUGny1wqUjiUP4Qcb1IV2S6Pv",
+ "2kKHBHiG2uKMfGefmFsDaVX6QC2NFVVuq0ZAtgLpDOpVmQuaTYkZ5+L16RtiZ7V9bKF1W2hxhUpKexUd",
+ "e1VQJWxceLCvmR5PXRg/zv5YarNqpbHwjtK0KGNppqbFhW+AuayhDR/VlxA7M/LKao7K6yV2EkMPSyYL",
+ "o3HVo1nZBWnC/Edrmq5RJWux1GGSH18h1FOlCl7lqavr16Wz8NwZuF2RUFsjdEqE0Zs3TNmXxeAK2pmt",
+ "dZq3Mwn4TNf28mTFuaWUqOyxrwzBbdDugbOBGt7MH4Wsg/gbCuS2wO5NC6aeY69oLZpu9dXeczw2u7Gu",
+ "mu5fjEwpF5ylWAkmdjW7V8rG+MBGFM3pGln9EXcnNHK4ojVf6zA5h8XBKrCeETrE9Y3wwVezqZY67J8a",
+ "n8NaU01WoJXjbJBNfeliZwdkXIErhYYP1gV8UsiWXxE5ZNRVndQujRuSEabFDCh235pvb53aj/Hil4yj",
+ "gO/Q5kLTraUOH1HSRitgmqwEKLeedm6w+mD6zDBNNoPtp5l/dAnHsG45s2zrg+4Pdeo90s4DbNq+NG1t",
+ "UZTm51YEsp30tCzdpMOFraPygN7yQQRHPIuJd+0EyK3HD0fbQ257Q0nwPjWEBlfoiIYS7+EeYdRFnjsP",
+ "CBih1VIUtiA2hCtaC4HxCBhvGIfmSbDIBZFGrwTcGDyvA/1UKqm2IuAonnYBNEfvc4yhKe1cD3cdqrPB",
+ "iBJco59jeBub+tQDjKNu0AhulO/ql8gMdQfCxEt8AtEhsl9tGqUqJ0RlmFHQqT8dYxyGcfsK9+0LoH8M",
+ "+jKR7a4ltSfnJjfRUJLoospWoBOaZbEakt/gV4JfSVah5ABbSKu6Bl9ZkhSrq7TLzfSpzU2UCq6qYs9c",
+ "vsEdp0tFTI5+ixMonzLRDD4jyH4N6331+t371y9PL16/sveFIqqyWaJG5pZQGIY4I2dcaTCic6WA/BKi",
+ "8Rfs90tnwXEwg7rzEaINa997QsRcmcUO/43VyRsmIBcrcuNoRR8Ygh1vLN63R+oJ5+boJYqtkvGYwKvv",
+ "7uhopr7deWz63+uBzMWqDcgXrmCxjxmHexRjw6/N/RYWeOgVf7Q3YF1/AWMDhX8tCLXbOnO4zTzxxu1V",
+ "g0SfVP0ayX47yfC7IlO8owcihIO6HdSKAdbJORQnnA6GtVPtEuw0JXs55WDSkg0ysulJ9lHsqIF3KLDI",
+ "xhWZz73e4wTYnjqAY+9FqI9Y6wP0vQ+HJSVlzoPfMIs+Zl3g/LBVc9+haza4uwgXjj5oWIw/7jBcQqcp",
+ "m4PXQCkUawrWxl59GBkudYEPNwQlgPpj+ViFK0i1EeoDH6wEuElBIDNZ8EbNv0vpDKgfdVSZq6Czr2xO",
+ "vzTxAWbTy2wJsrNsWdfZ+CIxp3WkDfr/8ZWYFXD3TEw7Zn105OxyCalmVwcyif5ptNQmS2Xq9Vj73FuQ",
+ "WMTqSEz/DP8N1esGoH2JPnvhCUrL3RmcoTyCS9g9UKRFDdE6s1PP825TgwAxgNwhMSQiVMyTbQ1vzrnI",
+ "VE0ZiAUfOWK7Q1PNabDAf5AXd8u5PEkSGubK7ZnySsQ091Fzma43yiDFoMKhZKN+ie1hQegVVjRX9eM7",
+ "9Tv7gVZDzvqV3jauBgLmfdW2Zl8NAZT/zSd52llydgnhEwRo2d9QmfkWUVXVa8HJnvuolyHky0N3gV7W",
+ "M7Mmzq+fExKpHYTRnGkuFOOrZCgkth1aF779igEEeB1g7XKEawnSPdWCJuRcKEi08HGB++DYhwr3Tult",
+ "kKAG6/VZ4AaraLxvyoRgBVSKVTOoC44IF2j0Vmqgk0Exj+E59yH7pf3ukyB8BcwRGrmj1+RgNQ4f4clU",
+ "D4kh1S+Juy0PJ1fcRutlnNunxlSssgc3qAytx6UUWZXaCzo8GI2NYWzdnD2sJKowpv1V9mT/HKtIvQlS",
+ "1S5hN7fyd7qmvCnn1T7WVoSyawhSwzu7fa8Ggbjuk6/sAlb3AucfqVRPJ6UQeTJgLj7rFyjpnoFLll5C",
+ "Rszd4WOjBor8k4dopaz9gZv1zhfkKEvgkD2aEWLU8qLUO+8abNfa7UzOH+h9829x1qyyNYOcvj/7yONh",
+ "fVjNR96Rv/lh9nM1BYb53XEqO8iB8hfbgeIokm4iT16MfdE44qzrPkPQEJWFIial3DIXetT57uv8EdIP",
+ "6vDv137CUglNDJa0piOUlrxBpyu8/NBYhMa9COA7HAAvVIqDNwE8N3Lg/MGBUj/USAmWMkgJreUf0rP9",
+ "Q9w1Xwq2SGFkvVmmLVxjneztfQmMKOplbZuI47lvwsC6CIJjrZi+6UOhKRFLzoaEY86lvKL5lzdfYMGM",
+ "U8SHe9gqvtBQ/w2RbFGpbhet8IaOmjvQde9vav4OzS3/BLNHURuwG8rZUeu3GHwJSSyNRnOSi+ZNFhyS",
+ "bHBMazQ+/oosXKR1KSFlinWSUDa+Gmat7mFx6Oa9s/365aF1/iT0HcjYKQiiJG+bynpa4P3QQNgc0T+Y",
+ "qQyc3CiVx6ivRxYR/MV4VJjyfOC6uGxZk22l0k40h5Bwz1blwI19Q6tyP5l77PJwHXjpVAr66xx9W7dw",
+ "G7mom7WNdYn0kbuv/NoYT0a8qqLpjq4UixAsSUoQVPLL8S9EwhLfHBDk8WOc4PHjqWv6y5P2Z3OcHz+O",
+ "inFfzInSevrdzRujmJ+Gov9shNtAoGlnPyqWZ4cIoxU23Lz/gYGxP7vEgT/kBZKfrT21f1Rd7fabuG+7",
+ "m4CIiay1NXkwVRAQPCIW2HWbRR/nV5BWkukd1jPw5jf2c7RO1He1xd55fOoMWHf3aXEJdUWMxr5fKX+7",
+ "fifsY/6FkanRea7xMbjXW1qUObiD8vWDxV/h6d+eZUdPj/+6+NvR86MUnj1/cXREXzyjxy+eHsOTvz1/",
+ "dgTHy69eLJ5kT549WTx78uyr5y/Sp8+OF8++evHXB4YPGZAtoBOfPTf53/hMT3L67iy5MMA2OKElq9+A",
+ "NGTsXwigKZ5EKCjLJyf+p//pT9gsFUUzvP914pJzJmutS3Uyn282m1nYZb5Cg16iRZWu536e/tt7787q",
+ "AGub8I07amNnDSngpjpSOMVv71+fX5DTd2ezhmAmJ5Oj2dHsGF/WKoHTkk1OJk/xJzw9a9z3uSO2ycnn",
+ "6+lkvgaao//L/FGAliz1n9SGrlYgZ+6pBPPT1ZO5FyXmn50x83rft3lYdXT+uWXzzQ70xKqE888+2X5/",
+ "61Y2u7N1Bx1GQrGv2XyBOTxjm4IKGg8vxb7yPf+MIvLg73OX2BD/iKqKPQNz7xiJt2xh6bPeGlg7Pdwj",
+ "svPPzavO15ZJ5BBzg9h8ABo8Aj0lTBO6EBKz3HW6NnzBp9cy1X4dvCbys8wQt+n1sn7hOqgsdvKhJ+Xb",
+ "gYgfCTmBIfPmoLZmanixlhWExa7qm6bVvrlvPhwlLz59Pp4eH13/xdwn7s/nT69H+jNfNg9kn9eXxciG",
+ "nzA3FS2zeH6fHB3d4f23Ux6+1o2bFDwz2Cv34B4ULoa0d7dVnYFIjYwDOXSd4QeeCH52wxXvtR+1ooci",
+ "z7l8QzPiU2Rw7uMvN/cZR2+y4evE3lvX08nzL7n6M25InuYEWwZFEfpb/w9+ycWG+5ZGyKiKgsqdP8aq",
+ "xRT8u/V4ldGVQmuiZFcUZTsueKvS++QTWrBj4ZUD/EZpegt+c256/ZvffCl+g5t0H/ymPdA985snNzzz",
+ "f/4V/5vD/tk47Llld3fisE7gs7mac/swbSMHdh8pif08/9wuktuSbNW60pnY2LTgKCvHSnA0d2Vj0HRZ",
+ "q0FaED9AE4pGfnRhvPkO7bUsA0IxDVJUutFTTWfvYGw8CWaE5gGjFeM4AZqEcRZbH4kGQR4KUsHtcx+d",
+ "a8NB9lZk0L828GL4tQK5a24GB+Nk2uIbbuMj1YjuzIb7x/z6ZmSBpmvrd+lrJ/UbH62/5xvKtLlcXEwY",
+ "YrTfWQPN5y5hrvNrE/zd+4IR7cGP7Sf2I7/O64J+0Y9dFTH21alIvlFjAwptKrjntTXlwyezdVgPxpFD",
+ "YyI4mc8xkGItlJ5PrqefO+aD8OOnerd8oYB6164/Xf//AAAA//9kloV/+q8AAA==",
+}
+
+// GetSwagger returns the content of the embedded swagger specification file
+// or error if failed to decode
+func decodeSpec() ([]byte, error) {
+ zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
+ if err != nil {
+ return nil, fmt.Errorf("error base64 decoding spec: %s", err)
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(zipped))
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+ var buf bytes.Buffer
+ _, err = buf.ReadFrom(zr)
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+
+ return buf.Bytes(), nil
+}
+
+var rawSpec = decodeSpecCached()
+
+// a naive cached of a decoded swagger spec
+func decodeSpecCached() func() ([]byte, error) {
+ data, err := decodeSpec()
+ return func() ([]byte, error) {
+ return data, err
+ }
+}
+
+// Constructs a synthetic filesystem for resolving external references when loading openapi specifications.
+func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) {
+ var res = make(map[string]func() ([]byte, error))
+ if len(pathToFile) > 0 {
+ res[pathToFile] = rawSpec
+ }
+
+ return res
+}
+
+// GetSwagger returns the Swagger specification corresponding to the generated code
+// in this file. The external references of Swagger specification are resolved.
+// The logic of resolving external references is tightly connected to "import-mapping" feature.
+// Externally referenced files must be embedded in the corresponding golang packages.
+// Urls can be supported but this task was out of the scope.
+func GetSwagger() (swagger *openapi3.T, err error) {
+ var resolvePath = PathToRawSpec("")
+
+ loader := openapi3.NewLoader()
+ loader.IsExternalRefsAllowed = true
+ loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
+ var pathToFile = url.String()
+ pathToFile = path.Clean(pathToFile)
+ getSpec, ok := resolvePath[pathToFile]
+ if !ok {
+ err1 := fmt.Errorf("path not found: %s", pathToFile)
+ return nil, err1
+ }
+ return getSpec()
+ }
+ var specData []byte
+ specData, err = rawSpec()
+ if err != nil {
+ return
+ }
+ swagger, err = loader.LoadFromData(specData)
+ if err != nil {
+ return
+ }
+ return
+}
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml b/daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml
new file mode 100644
index 000000000..c549edad3
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/public_routes.yml
@@ -0,0 +1,19 @@
+package: public
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ include-tags:
+ - nonparticipating
+ - public
+ exclude-tags:
+ - private
+ - common
+ - participating
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+additional-imports:
+ - alias: "."
+ package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+output: ./server/v2/generated/nonparticipating/public/routes.go
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
new file mode 100644
index 000000000..234fa90c8
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
@@ -0,0 +1,832 @@
+// Package public provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
+package public
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+
+ . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/oapi-codegen/pkg/runtime"
+ "github.com/getkin/kin-openapi/openapi3"
+ "github.com/labstack/echo/v4"
+)
+
+// ServerInterface represents all server handlers.
+type ServerInterface interface {
+ // Get account information.
+ // (GET /v2/accounts/{address})
+ AccountInformation(ctx echo.Context, address string, params AccountInformationParams) error
+ // Get account information about a given app.
+ // (GET /v2/accounts/{address}/applications/{application-id})
+ AccountApplicationInformation(ctx echo.Context, address string, applicationId uint64, params AccountApplicationInformationParams) error
+ // Get account information about a given asset.
+ // (GET /v2/accounts/{address}/assets/{asset-id})
+ AccountAssetInformation(ctx echo.Context, address string, assetId uint64, params AccountAssetInformationParams) error
+ // Get application information.
+ // (GET /v2/applications/{application-id})
+ GetApplicationByID(ctx echo.Context, applicationId uint64) error
+ // Get box information for a given application.
+ // (GET /v2/applications/{application-id}/box)
+ GetApplicationBoxByName(ctx echo.Context, applicationId uint64, params GetApplicationBoxByNameParams) error
+ // Get all box names for a given application.
+ // (GET /v2/applications/{application-id}/boxes)
+ GetApplicationBoxes(ctx echo.Context, applicationId uint64, params GetApplicationBoxesParams) error
+ // Get asset information.
+ // (GET /v2/assets/{asset-id})
+ GetAssetByID(ctx echo.Context, assetId uint64) error
+ // Get the block for the given round.
+ // (GET /v2/blocks/{round})
+ GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error
+ // Get the block hash for the block on the given round.
+ // (GET /v2/blocks/{round}/hash)
+ GetBlockHash(ctx echo.Context, round uint64) error
+ // Gets a proof for a given light block header inside a state proof commitment
+ // (GET /v2/blocks/{round}/lightheader/proof)
+ GetLightBlockHeaderProof(ctx echo.Context, round uint64) error
+ // Get a proof for a transaction in a block.
+ // (GET /v2/blocks/{round}/transactions/{txid}/proof)
+ GetTransactionProof(ctx echo.Context, round uint64, txid string, params GetTransactionProofParams) error
+ // Get the current supply reported by the ledger.
+ // (GET /v2/ledger/supply)
+ GetSupply(ctx echo.Context) error
+ // Get a state proof that covers a given round
+ // (GET /v2/stateproofs/{round})
+ GetStateProof(ctx echo.Context, round uint64) error
+ // Gets the current node status.
+ // (GET /v2/status)
+ GetStatus(ctx echo.Context) error
+ // Gets the node status after waiting for the given round.
+ // (GET /v2/status/wait-for-block-after/{round})
+ WaitForBlock(ctx echo.Context, round uint64) error
+ // Compile TEAL source code to binary, produce its hash
+ // (POST /v2/teal/compile)
+ TealCompile(ctx echo.Context, params TealCompileParams) error
+ // Disassemble program bytes into the TEAL source code.
+ // (POST /v2/teal/disassemble)
+ TealDisassemble(ctx echo.Context) error
+ // Provide debugging information for a transaction (or group).
+ // (POST /v2/teal/dryrun)
+ TealDryrun(ctx echo.Context) error
+ // Get parameters for constructing a new transaction
+ // (GET /v2/transactions/params)
+ TransactionParams(ctx echo.Context) error
+}
+
+// ServerInterfaceWrapper converts echo contexts to parameters.
+type ServerInterfaceWrapper struct {
+ Handler ServerInterface
+}
+
+// AccountInformation converts echo context to params.
+func (w *ServerInterfaceWrapper) AccountInformation(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "address" -------------
+ var address string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params AccountInformationParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // ------------- Optional query parameter "exclude" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "exclude", ctx.QueryParams(), &params.Exclude)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter exclude: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AccountInformation(ctx, address, params)
+ return err
+}
+
+// AccountApplicationInformation converts echo context to params.
+func (w *ServerInterfaceWrapper) AccountApplicationInformation(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "address" -------------
+ var address string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
+ }
+
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params AccountApplicationInformationParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AccountApplicationInformation(ctx, address, applicationId, params)
+ return err
+}
+
+// AccountAssetInformation converts echo context to params.
+func (w *ServerInterfaceWrapper) AccountAssetInformation(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "address" -------------
+ var address string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
+ }
+
+ // ------------- Path parameter "asset-id" -------------
+ var assetId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "asset-id", runtime.ParamLocationPath, ctx.Param("asset-id"), &assetId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params AccountAssetInformationParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AccountAssetInformation(ctx, address, assetId, params)
+ return err
+}
+
+// GetApplicationByID converts echo context to params.
+func (w *ServerInterfaceWrapper) GetApplicationByID(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetApplicationByID(ctx, applicationId)
+ return err
+}
+
+// GetApplicationBoxByName converts echo context to params.
+func (w *ServerInterfaceWrapper) GetApplicationBoxByName(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetApplicationBoxByNameParams
+ // ------------- Required query parameter "name" -------------
+
+ err = runtime.BindQueryParameter("form", true, true, "name", ctx.QueryParams(), &params.Name)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter name: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetApplicationBoxByName(ctx, applicationId, params)
+ return err
+}
+
+// GetApplicationBoxes converts echo context to params.
+func (w *ServerInterfaceWrapper) GetApplicationBoxes(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "application-id", runtime.ParamLocationPath, ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetApplicationBoxesParams
+ // ------------- Optional query parameter "max" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetApplicationBoxes(ctx, applicationId, params)
+ return err
+}
+
+// GetAssetByID converts echo context to params.
+func (w *ServerInterfaceWrapper) GetAssetByID(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "asset-id" -------------
+ var assetId uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "asset-id", runtime.ParamLocationPath, ctx.Param("asset-id"), &assetId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetAssetByID(ctx, assetId)
+ return err
+}
+
+// GetBlock converts echo context to params.
+func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetBlockParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetBlock(ctx, round, params)
+ return err
+}
+
+// GetBlockHash converts echo context to params.
+func (w *ServerInterfaceWrapper) GetBlockHash(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetBlockHash(ctx, round)
+ return err
+}
+
+// GetLightBlockHeaderProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetLightBlockHeaderProof(ctx, round)
+ return err
+}
+
+// GetTransactionProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ // ------------- Path parameter "txid" -------------
+ var txid string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "txid", runtime.ParamLocationPath, ctx.Param("txid"), &txid)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetTransactionProofParams
+ // ------------- Optional query parameter "hashtype" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "hashtype", ctx.QueryParams(), &params.Hashtype)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter hashtype: %s", err))
+ }
+
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetTransactionProof(ctx, round, txid, params)
+ return err
+}
+
+// GetSupply converts echo context to params.
+func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetSupply(ctx)
+ return err
+}
+
+// GetStateProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetStateProof(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetStateProof(ctx, round)
+ return err
+}
+
+// GetStatus converts echo context to params.
+func (w *ServerInterfaceWrapper) GetStatus(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetStatus(ctx)
+ return err
+}
+
+// WaitForBlock converts echo context to params.
+func (w *ServerInterfaceWrapper) WaitForBlock(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.WaitForBlock(ctx, round)
+ return err
+}
+
+// TealCompile converts echo context to params.
+func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params TealCompileParams
+ // ------------- Optional query parameter "sourcemap" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "sourcemap", ctx.QueryParams(), &params.Sourcemap)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter sourcemap: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.TealCompile(ctx, params)
+ return err
+}
+
+// TealDisassemble converts echo context to params.
+func (w *ServerInterfaceWrapper) TealDisassemble(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.TealDisassemble(ctx)
+ return err
+}
+
+// TealDryrun converts echo context to params.
+func (w *ServerInterfaceWrapper) TealDryrun(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.TealDryrun(ctx)
+ return err
+}
+
+// TransactionParams converts echo context to params.
+func (w *ServerInterfaceWrapper) TransactionParams(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.TransactionParams(ctx)
+ return err
+}
+
+// This is a simple interface which specifies echo.Route addition functions which
+// are present on both echo.Echo and echo.Group, since we want to allow using
+// either of them for path registration
+type EchoRouter interface {
+ CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+}
+
+// RegisterHandlers adds each server route to the EchoRouter.
+func RegisterHandlers(router EchoRouter, si ServerInterface, m ...echo.MiddlewareFunc) {
+ RegisterHandlersWithBaseURL(router, si, "", m...)
+}
+
+// Registers handlers, and prepends BaseURL to the paths, so that the paths
+// can be served under a prefix.
+func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string, m ...echo.MiddlewareFunc) {
+
+ wrapper := ServerInterfaceWrapper{
+ Handler: si,
+ }
+
+ router.GET(baseURL+"/v2/accounts/:address", wrapper.AccountInformation, m...)
+ router.GET(baseURL+"/v2/accounts/:address/applications/:application-id", wrapper.AccountApplicationInformation, m...)
+ router.GET(baseURL+"/v2/accounts/:address/assets/:asset-id", wrapper.AccountAssetInformation, m...)
+ router.GET(baseURL+"/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
+ router.GET(baseURL+"/v2/applications/:application-id/box", wrapper.GetApplicationBoxByName, m...)
+ router.GET(baseURL+"/v2/applications/:application-id/boxes", wrapper.GetApplicationBoxes, m...)
+ router.GET(baseURL+"/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
+ router.GET(baseURL+"/v2/blocks/:round", wrapper.GetBlock, m...)
+ router.GET(baseURL+"/v2/blocks/:round/hash", wrapper.GetBlockHash, m...)
+ router.GET(baseURL+"/v2/blocks/:round/lightheader/proof", wrapper.GetLightBlockHeaderProof, m...)
+ router.GET(baseURL+"/v2/blocks/:round/transactions/:txid/proof", wrapper.GetTransactionProof, m...)
+ router.GET(baseURL+"/v2/ledger/supply", wrapper.GetSupply, m...)
+ router.GET(baseURL+"/v2/stateproofs/:round", wrapper.GetStateProof, m...)
+ router.GET(baseURL+"/v2/status", wrapper.GetStatus, m...)
+ router.GET(baseURL+"/v2/status/wait-for-block-after/:round", wrapper.WaitForBlock, m...)
+ router.POST(baseURL+"/v2/teal/compile", wrapper.TealCompile, m...)
+ router.POST(baseURL+"/v2/teal/disassemble", wrapper.TealDisassemble, m...)
+ router.POST(baseURL+"/v2/teal/dryrun", wrapper.TealDryrun, m...)
+ router.GET(baseURL+"/v2/transactions/params", wrapper.TransactionParams, m...)
+
+}
+
+// Base64 encoded, gzipped, json marshaled Swagger object
+var swaggerSpec = []string{
+
+ "H4sIAAAAAAAC/+x9aXPcOJLoX0HUboSPLZbkq3esiI598tE92rHdDkvTe7T92igyqwojEuAAoFTVfv7v",
+ "L5AASJAEq6jDcrtbn2wVcSQSiUTe+DRJRVEKDlyrycGnSUklLUCDxL9omoqK64Rl5q8MVCpZqZngkwP/",
+ "jSgtGV9OphNmfi2pXk2mE04LaNqY/tOJhH9WTEI2OdCygulEpSsoqBlYb0rTuh5pnSxF4oY4tEMcvZh8",
+ "3vKBZpkEpfpQ/sTzDWE8zasMiJaUK5qaT4qcM70iesUUcZ0J40RwIGJB9KrVmCwY5Jma+UX+swK5CVbp",
+ "Jh9e0ucGxESKHPpwPhfFnHHwUEENVL0hRAuSwQIbragmZgYDq2+oBVFAZboiCyF3gGqBCOEFXhWTg18m",
+ "CngGEncrBXaG/11IgN8g0VQuQU8+TGOLW2iQiWZFZGlHDvsSVJVrRbAtrnHJzoAT02tGXldKkzkQysm7",
+ "H56TR48ePTULKajWkDkiG1xVM3u4Jtt9cjDJqAb/uU9rNF8KSXmW1O3f/fAc5z92CxzbiioF8cNyaL6Q",
+ "oxdDC/AdIyTEuIYl7kOL+k2PyKFofp7DQkgYuSe28bVuSjj/V92VlOp0VQrGdWRfCH4l9nOUhwXdt/Gw",
+ "GoBW+9JgSppBf9lPnn749GD6YP/zv/xymPyv+/PJo88jl/+8HncHBqIN00pK4OkmWUqgeFpWlPfx8c7R",
+ "g1qJKs/Iip7h5tMCWb3rS0xfyzrPaF4ZOmGpFIf5UihCHRllsKBVromfmFQ8N2zKjOaonTBFSinOWAbZ",
+ "1HDf8xVLVySlyg6B7cg5y3NDg5WCbIjW4qvbcpg+hygxcF0KH7ig3y8ymnXtwASskRskaS4UJFrsuJ78",
+ "jUN5RsILpbmr1MUuK3KyAoKTmw/2skXccUPTeb4hGvc1I1QRSvzVNCVsQTaiIue4OTk7xf5uNQZrBTFI",
+ "w81p3aPm8A6hr4eMCPLmQuRAOSLPn7s+yviCLSsJipyvQK/cnSdBlYIrIGL+D0i12fb/PP7pDRGSvAal",
+ "6BLe0vSUAE9FNrzHbtLYDf4PJcyGF2pZ0vQ0fl3nrGARkF/TNSuqgvCqmIM0++XvBy2IBF1JPgSQHXEH",
+ "nRV03Z/0RFY8xc1tpm0JaoaUmCpzupmRowUp6Pr7/akDRxGa56QEnjG+JHrNB4U0M/du8BIpKp6NkGG0",
+ "2bDg1lQlpGzBICP1KFsgcdPsgofxi8HTSFYBOH6QQXDqWXaAw2EdoRlzdM0XUtIlBCQzI393nAu/anEK",
+ "vGZwZL7BT6WEMyYqVXcagBGn3i5ec6EhKSUsWITGjh06DPewbRx7LZyAkwquKeOQGc6LQAsNlhMNwhRM",
+ "uF2Z6V/Rc6rgu8dDF3jzdeTuL0R317fu+KjdxkaJPZKRe9F8dQc2Lja1+o9Q/sK5FVsm9ufeRrLliblK",
+ "FizHa+YfZv88GiqFTKCFCH/xKLbkVFcSDt7z++YvkpBjTXlGZWZ+KexPr6tcs2O2ND/l9qdXYsnSY7Yc",
+ "QGYNa1Sbwm6F/ceMF2fHeh1VGl4JcVqV4YLSllY635CjF0ObbMe8KGEe1qpsqFWcrL2mcdEeel1v5ACQ",
+ "g7grqWl4ChsJBlqaLvCf9QLpiS7kb+afssxNb10uYqg1dOzuW7QNOJvBYVnmLKUGie/cZ/PVMAGwWgJt",
+ "WuzhhXrwKQCxlKIEqZkdlJZlkouU5onSVONI/yphMTmY/MteY1zZs93VXjD5K9PrGDsZedTKOAktywuM",
+ "8dbINWoLszAMGj8hm7BsDyUixu0mGlJihgXncEa5njX6SIsf1Af4FzdTg28rylh8d/SrQYQT23AOyoq3",
+ "tuEdRQLUE0QrQbSitLnMxbz+4e5hWTYYxO+HZWnxgaIhMJS6YM2UVvdw+bQ5SeE8Ry9m5MdwbJSzBc83",
+ "5nKwooa5Gxbu1nK3WG04cmtoRryjCG6nkDOzNR4NRoa/DopDnWElciP17KQV0/ivrm1IZub3UZ2/DRIL",
+ "cTtMXKhFOcxZBQZ/CTSXux3K6ROOs+XMyGG37+XIxowSJ5hL0crW/bTjbsFjjcJzSUsLoPti71LGUQOz",
+ "jSysV+SmIxldFObgDAe0hlBd+qztPA9RSJAUOjA8y0V6+leqVtdw5ud+rP7xw2nICmgGkqyoWs0mMSkj",
+ "PF7NaGOOmGmI2juZB1PN6iVe1/J2LC2jmgZLc/DGxRKLeuyHTA9kRHf5Cf9Dc2I+m7NtWL8ddkZOkIEp",
+ "e5ydByEzqrxVEOxMpgGaGAQprPZOjNZ9ISifN5PH92nUHr20BgO3Q24RuENife3H4JlYx2B4Jta9IyDW",
+ "oK6DPsw4KEZqKNQI+F44yATuv0MflZJu+kjGsccg2SzQiK4KTwMPb3wzS2N5PZwLeTnu02ErnDT2ZELN",
+ "qAHznXaQhE2rMnGkGLFJ2QadgRoX3nam0R0+hrEWFo41/QJYUGbU68BCe6DrxoIoSpbDNZD+Ksr051TB",
+ "o4fk+K+HTx48/PXhk+8MSZZSLCUtyHyjQZG7TjcjSm9yuNdfGWpHVa7jo3/32Fsh2+PGxlGikikUtOwP",
+ "Za2bVgSyzYhp18daG8246hrAMYfzBAwnt2gn1nBvQHvBlJGwivm1bMYQwrJmlow4SDLYSUwXXV4zzSZc",
+ "otzI6jpUWZBSyIh9DY+YFqnIkzOQiomIq+Sta0FcCy/elt3fLbTknCpi5kbTb8VRoIhQll7z8XzfDn2y",
+ "5g1utnJ+u97I6ty8Y/aljXxvSVSkBJnoNScZzKtlSxNaSFEQSjLsiHf0K7Zc6UBkeSuFWFz7rR2dJbYk",
+ "/GAFvtz06Yt9b0QGRu2u1DWw92awBnuGckKc0bmoNKGEiwxQR69UnPEPOHrRw4SOMR3eJXplZbg5GH0w",
+ "pZVZbVUSdPv0aLHpmNDUUlGCqFEDdvHaoWFb2emsEzGXQDOjJwInYu6Mz84sjouk6LPSnnW6ayeiObfg",
+ "KqVIQSmj31utbSdovp0lS70FTwg4AlzPQpQgCyqvDOzp2U44T2GToIdVkbt/+1nd+wrwaqFpvgOx2CaG",
+ "3lqFcB6GPtTjpt9GcN3JQ7KjEojnfUZfMQwiBw1DKLwQTgb3rwtRbxevjpYzkGjr/6IU7ye5GgHVoH5h",
+ "er8qtFU5EDfkROcTVqAliFMuFKSCZyo6WE6VTnaxZdOoJd+bFQScMMaJceABa+QrqrT1TzGeoVptrxOc",
+ "x5opzRTDAA+KOGbkn7100x87NfcgV5WqRR1VlaWQGrLYGjist8z1Btb1XGIRjF3LU1qQSsGukYewFIzv",
+ "kGVXYhFEdW3GdQ7c/uLQ2Gnu+U0UlS0gGkRsA+TYtwqwG8ZODADCVINoSzhMdSinDtiYTpQWZWm4hU4q",
+ "XvcbQtOxbX2o/9607RMX1c29nQkws2sPk4P83GLWRs2sqFHKcGRS0FMje6CKZR1pfZjNYUwU4ykk2yjf",
+ "HMtj0yo8AjsO6YB26+Lygtk6h6NDv1GiGySCHbswtOABVfstlZqlrERJ8W+wuXbBuTtB1ABMMtCUGfUv",
+ "+GCF6DLsT6xntDvm5QTpUVpRH/yeWhRZTs4UXhht4E9hg56gtzbk5iQI1LkGTSAyqjndlBME1DvyjQAT",
+ "NoE1TXW+MdecXsGGnIMEoqp5wbS2MVRtRUGLMgkHiFqctszozKs2XMXvwBh77zEOFSyvvxXTiZWotsN3",
+ "0hGrWuhwklQpRD7C09ZDRhSCUZ44Ugqz68yF7Pm4Lk9JLSCdEIO29Zp53lEtNOMKyP+IiqSUo8Baaahv",
+ "BCGRzeL1a2YwF1g9p/O5NRiCHAqwcjh+uX+/u/D7992eM0UWcO7jXE3DLjru30ct+K1QunW4rsEEY47b",
+ "UYS3oynOXBROhuvylN0+HzfymJ182xm8tt+ZM6WUI1yz/CszgM7JXI9Ze0gj4/xdOO4oK1swdGzduO8Y",
+ "cPBlbDTN0DHo+hMHbtrm45Cn1shX+eYa+LQdiEgoJSg8VaFeouxXsQhDod2xUxuloeibbmzXXwcEm3de",
+ "LOhJmYLnjENSCA6baPYP4/AaP8Z625M90Bl57FDfrtjUgr8DVnueMVR4Vfzibgek/LYOUbiGze+O27Ha",
+ "hUHgqJVCXhJK0pyhziq40rJK9XtOUSoOznLEleNl/WE96blvElfMInqTG+o9p+jGq2XlqPl5AREt+AcA",
+ "ry6parkEpTvywQLgPXetGCcVZxrnKsx+JXbDSpDoT5nZlgXdkAXNUa37DaQg80q3b0yMVVXaaF3WhGim",
+ "IWLxnlNNcjAa6GvGT9Y4nA8J9TTDQZ8LeVpjYRY9D0vgoJhK4i6nH+1XjAZwy1+5yABMHLKfrdHJjN8E",
+ "tG40tJJh/u/d/zj45TD5X5r8tp88/be9D58ef753v/fjw8/ff///2j89+vz9vf/419hOedhjkZQO8qMX",
+ "Tpo8eoEiQ2N16sF+YxaHgvEkSmQnKyAF4xiQ36EtctcIPp6A7jVmPbfr77lec0NIZzRnGdWXI4cui+ud",
+ "RXs6OlTT2oiOAunX+iEWHbEUSUnTU/TYTpZMr6r5LBXFnpei95ailqj3MgqF4Pgt26Ml21MlpHtnD3Zc",
+ "6VfgVyTCrjpM9tICQd/fG49+RoOqC2jGk7eouCWKSjmjLgb3eb+bWEzrCHeb2XpAMPx5Rb3T2P358Ml3",
+ "k2kTtlx/N5q6/fohciZYto4Fp2ewjklq7qjhEbujSEk3CnScDyHsURej9UuFwxZgRHy1YuXN8xyl2TzO",
+ "K33IlNP41vyI21gmcxLRPLtxVh+xuHm4tQTIoNSrWMZbS+bAVs1uAnRcZqUUZ8CnhM1g1tW4siUo7+zM",
+ "gS4w8wpNjGJMCGh9DiyheaoIsB4uZJRaE6MfFJMd3/88nTgxQl27ZO8GjsHVnbO2xfq/tSB3fnx5QvYc",
+ "61V3bJ6EHTqIbI9YMlzwZsuZariZzfO1iSLv+Xv+AhaMM/P94D3PqKZ7c6pYqvYqBfIZzSlPYbYU5MDH",
+ "g76gmr7nPZltMBU/iMQlZTXPWUpOQ9m6IU+bXtkf4f37XwzHf//+Q8+v1JeE3VRR/mInSM6ZXolKJy5/",
+ "LJFwTmUWAV3V+UM4ss3+3DbrlLixLSt2+Wlu/DjPo2WpunkE/eWXZW6WH5ChclHyZsuI0kJ6qcaIOhYa",
+ "3N83wl0Mkp775MNKgSIfC1r+wrj+QJL31f7+IyCtwPqPTngwNLkpoWXzulSeQ9fehQu3GhKstaRJSZeg",
+ "osvXQEvcfZS8C7Su5jnBbq2Afh+whEM1C/D4GN4AC8eFg5Nxcce2ly8EEF8CfsItxDZG3GicFpfdryDE",
+ "/9Lb1UkT6O1SpVeJOdvRVSlD4n5n6vzgpRGyvCdJsSU3h8ClUs+BpCtITyHDrE4oSr2Ztrp7Z6UTWT3r",
+ "YMpmP9sAXUzRQ/PgHEhVZtQJ9ZRvurlSCrT2CWLv4BQ2J6LJ8LtIclQ7V0cNHVSk1EC6NMQaHls3Rnfz",
+ "nUcc8xPK0qe8YOyzJ4uDmi58n+GDbEXeazjEMaJo5ZIMIYLKCCIs8Q+g4BILNeNdifRjyzP6ytzefJFk",
+ "ac/7iWvSqGHOeR2uBlNk7PcCsJSCOFdkTo3cLlwVAJuPEnCxStElDEjIoYV2ZNZHy6qLg+y696I3nVh0",
+ "L7TefRMF2TZOzJqjlALmiyEVVGY6IQt+JusEwBXMCBb3cQib5ygm1bEdlulQ2bKU22olQ6DFCRgkbwQO",
+ "D0YbI6Fks6LKFyjAOg7+LI+SAb5gftW2rNqjwNseFGuoc2Y9z+2e05526XJrfUKtz6INVcsRGbFGwscA",
+ "v9h2CI4CUAY5LO3CbWNPKE2uV7NBBo6fFouccSBJzHFPlRIpsxUmmmvGzQFGPr5PiDUmk9EjxMg4ABud",
+ "WzgweSPCs8mXFwGSu1w16sdGt1jwN8TDam0omxF5RGlYOOMDQZOeA1AX7VHfX52YIxyGMD4lhs2d0dyw",
+ "OafxNYP0kjtRbO2kcjr36r0hcXaLLd9eLBdak72KLrOaUGbyQMcFui0Qz8U6sXH1UYl3vp4beo9G92GU",
+ "f+xg2jTaO4rMxRpd9ni12GiyHbAMw+HBCDT8NVNIr9hv6Da3wGybdrs0FaNChSTjzHk1uQyJE2OmHpBg",
+ "hsjlbpAZeykAOsaOpoacU353Kqlt8aR/mTe32rSp+OADp2PHf+gIRXdpAH99K0ydy/q2K7FE7RRtz3M7",
+ "jTcQIWNEb9hE393TdyopyAGVgqQlRCWnMSeg0W0Ab5xj3y0wXmCyMOWbe0E4g4QlUxoac7y5mL1/6abN",
+ "kxRrlAixGF6dLuXCrO+dEPU1ZZPgsWNrmTe+gjOhIVkwqXSCvozoEkyjHxQq1T+YpnFZqR0wYct1sSzO",
+ "G3DaU9gkGcurOL26ef/2wkz7pmaJqpojv2WcAE1XZI7l5aJhVFumtpF2Wxf8yi74Fb229Y47DaapmVga",
+ "cmnP8Y2ciw7n3cYOIgQYI47+rg2idAuDRNnnBeQ6lgEZyE32cGam4Wyb9bV3mDI/9s4AFAvF8B1lR4qu",
+ "JTAYbF0FQzeREUuYDqqz9bN6Bs4ALUuWrTu2UDvqoMZML2Tw8GUvOljA3XWD7cBAYPeMBRZLUO0KJ42A",
+ "b+vstRKMZ6Mwc9KuQxIyhHAqpnyV2D6i6sSDXbg6AZr/DTY/m7a4nMnn6eRqptMYrt2IO3D9tt7eKJ7R",
+ "yW9NaS1PyAVRTstSijOaJ87APESaUpw50sTm3h59w6wubsY8eXn46q0D//N0kuZAZVKLCoOrwnblN7Mq",
+ "W0xl4ID4KpRG5/MyuxUlg82vK0CERunzFbiKf4E02itN1DgcgqPojNSLeKzRTpOz843YJW7xkUBZu0ga",
+ "8531kLS9IvSMstzbzTy0A3FBuLhx9a2iXCEc4MrelcBJllwru+md7vjpaKhrB08K59pSk7CwZTcVEbzr",
+ "QjciJJrjkFQLioWFrFWkz5x4VaAlIVE5S+M2Vj5Xhji49Z2ZxgQbDwijZsSKDbhiecWCsUwzNULR7QAZ",
+ "zBFFpi9SNYS7uXD10ivO/lkBYRlwbT5JPJWdg4qVnJy1vX+dGtmhP5cb2From+GvImOERbW6Nx4CsV3A",
+ "CD11PXBf1CqzX2htkTI/BC6JCzj8wxl7V+IWZ72jD0fNNgxy1fa4heXN+/zPEIYthbm7trpXXl11r4E5",
+ "orXSmUoWUvwGcT0P1eNI1oEvI8YwyuU34LNI8laXxdTWnabkezP74HYPSTehFaodpDBA9bjzgVsO6xl5",
+ "CzXldqtt6eJWrFucYML41D07fkMwDuZeTG9Oz+c0VuzJCBkGpsPGAdyypWtBfGePe2f2Z66y24wEvuS6",
+ "LbP5eCXIJiGon9t/SYHBTjtaVGgkA6TaUCaYWv9frkRkmIqfU24rYJt+9ii53gqs8cv0OhcSs2lV3Oyf",
+ "QcoKmsclhyztm3gztmS2/nOlICgw7AayhfMtFbkizdbF3qDmaEH2p0EJc7cbGTtjis1zwBYPbIs5VcjJ",
+ "a0NU3cUsD7heKWz+cETzVcUzCZleKYtYJUgt1KF6Uzuv5qDPATjZx3YPnpK76LZT7AzuGSy6+3ly8OAp",
+ "Gl3tH/uxC8AVet/GTTJkJ//l2EmcjtFvaccwjNuNOovmhtrXOYYZ15bTZLuOOUvY0vG63WepoJwuIR4p",
+ "UuyAyfbF3URDWgcvPLOl5ZWWYkOYjs8Pmhr+NBDHbtifBYOkoiiYLpxzR4nC0FNTPdhO6oezdepd4TcP",
+ "l/+IPtLSu4g6SuTNGk3t/RZbNXqy39AC2midEmpTqHPWRC/4cpTkyFdowEp4dQE8ixszl1k6ijkYzLAg",
+ "pWRco2JR6UXyF5KuqKSpYX+zIXCT+XePI9X/2lWo+MUAv3G8S1Agz+KolwNk72UI15fc5YInheEo2b0m",
+ "byQ4lYPO3Ljbbsh3uH3osUKZGSUZJLeqRW404NRXIjy+ZcArkmK9ngvR44VXduOUWck4edDK7NDf371y",
+ "UkYhZKzsUnPcncQhQUsGZxi7F98kM+YV90Lmo3bhKtB/Xc+DFzkDscyf5Zgi8ExEtFNfkbK2pLtY9Yh1",
+ "YOiYmg+GDOZuqClpV/+7eaefNz73nU/mi4cV/+gC+5W3FJHsVzCwiUFl0uh2ZvX3wP9NyTOxHrupnRPi",
+ "N/Z3gJooSiqWZz83+Z2dwq+S8nQV9WfNTcdfmycq6sXZ+yla3WhFOYc8OpyVBX/1MmNEqv2HGDtPwfjI",
+ "tt1atHa5ncU1gLfB9ED5CQ16mc7NBCFW2wlvdUB1vhQZwXmaUjoN9+zXMA4qTf6zAqVjyUP4wQZ1od3S",
+ "6Lu20CEBnqG2OCM/2ifmVkBalT5QS2NFlduqEZAtQTqDelXmgmZTYsY5eXn4ithZbR9baN0WWlyiktJe",
+ "RcdeFVQJGxce7Gumx1MXxo+zPZbarFppLLyjNC3KWJqpaXHiG2Aua2jDR/UlxM6MvLCao/J6iZ3E0MOC",
+ "ycJoXPVoVnZBmjD/0ZqmK1TJWix1mOTHVwj1VKmCV3nq6vp16Sw8dwZuVyTU1gidEmH05nOm7MticAbt",
+ "zNY6zduZBHyma3t5suLcUkpU9thWhuAyaPfA2UANb+aPQtZB/AUFcltg96IFU4+xV7QWTbf6au85Hpvd",
+ "WFdN9y9GppQLzlKsBBO7mt0rZWN8YCOK5nSNrP6IuxMaOVzRmq91mJzD4mAVWM8IHeL6Rvjgq9lUSx32",
+ "T43PYa2oJkvQynE2yKa+dLGzAzKuwJVCwwfrAj4pZMuviBwy6qpOapfGBckI02IGFLsfzLc3Tu3HePFT",
+ "xlHAd2hzoenWUoePKGmjFTBNlgKUW087N1j9YvrMME02g/WHmX90CcewbjmzbOuD7g916D3SzgNs2j43",
+ "bW1RlObnVgSynfSwLN2kw4Wto/KAXvNBBEc8i4l37QTIrccPR9tCbltDSfA+NYQGZ+iIhhLv4R5h1EWe",
+ "Ow8IGKHVUhS2IDaEK1oLgfEIGK8Yh+ZJsMgFkUavBNwYPK8D/VQqqbYi4CiedgI0R+9zjKEp7VwPVx2q",
+ "s8GIElyjn2N4G5v61AOMo27QCG6Ub+qXyAx1B8LEc3wC0SGyX20apSonRGWYUdCpPx1jHIZx+wr37Qug",
+ "fwz6MpHtriW1J+ciN9FQkui8ypagE5plsRqSz/Arwa8kq1BygDWkVV2DryxJitVV2uVm+tTmJkoFV1Wx",
+ "ZS7f4IrTpSImR7/BCZRPmWgGnxFkv4b1vnj59t3L54cnL1/Y+0IRVdksUSNzSygMQ5yRI640GNG5UkA+",
+ "hmj8iP0+dhYcBzOoOx8h2rD2vSdEzJWZb/DfWJ28YQJysSIXjlb0gSHY8cLifXuknnBujl6i2DIZjwm8",
+ "+q6Ojmbqy53Hpv+1HshcLNuA3HAFi23MONyjGBt+ae63sMBDr/ijvQHr+gsYGyj8a0Go3daZw23miTdu",
+ "rxok+qTq10i220mG3xWZ4h09ECEc1O2gVgywTs6hOOF0MKydapdgpynZyikHk5ZskJFNT7KPYkcNvEOB",
+ "RTauyHzu9R4nwPbUARx7K0J9xFofoL/5cFhSUuY8+A2z6GPWBc4PWzW3Hbpmg7uLcOHog4bF+OMOwyV0",
+ "mrI5eA2UQrGmYG3s1YeR4VIn+HBDUAKoP5aPVTiDVBuhPvDBSoCLFAQykwVv1NyW0hlQP+qoMldBZ1vZ",
+ "nH5p4h3MppfZEmRn2bKus/FFYg7rSBv0/+MrMUvg7pmYdsz66MjZxQJSzc52ZBL9l9FSmyyVqddj7XNv",
+ "QWIRqyMx/TP8F1SvG4C2JfpshScoLXdlcIbyCE5hc0eRFjVE68xOPc+7TA0CxAByh8SQiFAxT7Y1vDnn",
+ "IlM1ZSAWfOSI7Q5NNafBAv9BXtwl5/IkSWiYK7dlyjMR09xHzWW6XiiDFIMKh5KN+iW2hwWhF1jRXNWP",
+ "79Tv7AdaDTnqV3o7dzUQMO+rtjX7agig/G8+ydPOkrNTCJ8gQMv+OZWZbxFVVb0WnGy5j3oZQr48dBfo",
+ "RT0za+L8+jkhkdpBGM2Z5kIxvkyGQmLboXXh268YQIDXAdYuR7gWIN1TLWhCzoWCRAsfF7gNjm2ocO+U",
+ "XgYJarBenwVusIrGu6ZMCFZApVg1g7rgiHCBRm+lBjoZFPMYnnMbsp/b7z4JwlfAHKGRO3pNdlbj8BGe",
+ "TPWQGFL9grjbcndyxWW0Xsa5fWpMxSp7cIPK0HpcSpFVqb2gw4PR2BjG1s3ZwkqiCmPaX2VP9s+xitSr",
+ "IFXtFDZ7Vv5OV5Q35bzax9qKUHYNQWp4Z7ev1SAQ133ypV3A8lrg/JpK9XRSCpEnA+bio36Bku4ZOGXp",
+ "KWTE3B0+NmqgyD+5i1bK2h94vtr4ghxlCRyyezNCjFpelHrjXYPtWrudyfkdvW3+Nc6aVbZmkNP3Z+95",
+ "PKwPq/nIK/I3P8x2rqbAML8rTmUH2VH+Yj1QHEXS88iTF2NfNI4467rPEDREZaGISSmXzIUedb77On+E",
+ "9IM6/Nu1n7BUQhODJa3pCKUlb9DpCi+vG4vQuBcBfIcd4IVKcfAmgOdGDpyvHCj1ukZKsJRBSmgtf5ee",
+ "7R/irvlSsEUKI+vNMm3hGutkb+9LYERRz2vbRBzPfRMG1kUQHGvF9E0fCk2JWHI2JBxzLuUZzW/efIEF",
+ "Mw4RH+5hq/hCQ/03RLJFpbpctMIrOmruQNe9vqn5WzS3/BeYPYragN1Qzo5av8XgS0hiaTSak1w0b7Lg",
+ "kOQcx7RG4wffkbmLtC4lpEyxThLKua+GWat7WBy6ee9su365a50/C30FMnYKgijJm6aynhZ4PzQQNkf0",
+ "KzOVgZMbpfIY9fXIIoK/GI8KU553XBenLWuyrVTaieYQEq7Zqhy4sS9oVe4nc49dHq4DL51KQX+do2/r",
+ "Fm4jF3WztrEukT5yt5VfG+PJiFdVNN3RlWIRgiVJCYJKPj74SCQs8M0BQe7fxwnu35+6ph8ftj+b43z/",
+ "flSMuzEnSuvpdzdvjGJ+Hor+sxFuA4Gmnf2oWJ7tIoxW2HDz/gcGxv7qEge+ygskv1p7av+outrtF3Hf",
+ "djcBERNZa2vyYKogIHhELLDrNos+zq8grSTTG6xn4M1v7Ndonagfa4u98/jUGbDu7tPiFOqKGI19v1L+",
+ "dv1R2Mf8CyNTo/Nc42NwL9e0KHNwB+X7O/N/h0d/eZztP3rw7/O/7D/ZT+Hxk6f7+/TpY/rg6aMH8PAv",
+ "Tx7vw4PFd0/nD7OHjx/OHz98/N2Tp+mjxw/mj797+u93DB8yIFtAJz57bvLf+ExPcvj2KDkxwDY4oSWr",
+ "34A0ZOxfCKApnkQoKMsnB/6n/+NP2CwVRTO8/3XiknMmK61LdbC3d35+Pgu77C3RoJdoUaWrPT9P/+29",
+ "t0d1gLVN+MYdtbGzhhRwUx0pHOK3dy+PT8jh26NZQzCTg8n+bH/2AF/WKoHTkk0OJo/wJzw9K9z3PUds",
+ "k4NPn6eTvRXQHP1f5o8CtGSp/6TO6XIJcuaeSjA/nT3c86LE3idnzPxsRl3GKj3YUPEgPrj/goBzjGC8",
+ "jQ0Fb1XkVa5A7LSu0+xsDTzDCF5rHzSsrUbWUdYUJDxqGJUvy2DrVB38EnmKasGWley8WltHEbgi7kyR",
+ "/zz+6Q0RkjiV5i1NT8MoWSTIf1YgNw3BOFYWFljyNXVdLG2hlmU78KwRk2LvW8aeYsCZzT4HlFr7FRpO",
+ "pGUFISQNXzW8cj95+uHTk798nowABJ1cCjD99iPN84/2WWFYo6fAF7BwCcrTSP1YFI+njZ0aOzTbNMXI",
+ "ufpr+ERA3aYdr/2RCw4fh7bBARbdB5rnpqHgENuDD5ggipSAh+jh/v61vS1SpyjY+Lt6FE8Slxioz2Hs",
+ "p8jDg/6JkYFXBx9f40LbkTtXXm53uN6in9EMy7aD0nYpD77ZpRxx9DMbjk/sjfZ5OnnyDe/NETc8h+YE",
+ "WwbVF/q3yN/5KRfn3Lc00kxVFFRuUFYJ3pbopD/RpUKTJbJIe7Zb1eQnHz4PXml7YbHsvU8tV2V2pQuv",
+ "907A0Ysdd+AdNcQ5+7XLOrW4zfe61DI6s1zBcSz+rO7NyI9hb+TemApsE20ryZvXeEspzlhm+LCL8vAV",
+ "UxrY7qgwSzp6Iwf239vL+Ytezodt00Sr+FUMmBaJb4WpF8tw1duxH8LeeUrpUk8VBSWvL1E49Iu+59DR",
+ "DAcfyB/BhW9xN4C7IRkogLcWh9qlyr8837VKXnBNtO6DL8iVv3GJ7jXNDZ0Ey+2k09mKcLeS3p9G0qvD",
+ "2+yLiL4I6tVkP3wOYe+Tr/J3DfKeq3I4QtILdeagb1CF7m6Hndyb2ZJ9YZvL8QwXz7ZThsPai7fS25eW",
+ "3vpFS2NgNKUov57EhjCsmqqmF3mrsPUIyYWqr36jItqfGFmDMpmBdLc0dgne2JO0HCf+YjzzDylhOaTd",
+ "ylZ/atmqDiG/knTVKjvskhIC79KV7G5duxrTtZjVTiMIOFv9yKM7wtPmiQTDYrBqhc8EVlOv9qFn02qE",
+ "drOmPaWwLz/9CKH2+Wxz9GKX6PQNGXFGV0+K3ALxvfnSvDTqMHh3Mw6Dcbzp8f7jm4Mg3IU3QpMf8Bb/",
+ "whzyi7K0OFldlIVt40h7c1vXcRtX4h22hIyiqdcY8Cgsix3WhLSBEnfdY2RhXcB7M+KrR6q6FrZLzV0K",
+ "mjdVLKhc2k6GxxkkkDv+zwMc/86M/CAkYVwrW2hCu0LJ5A7j+uDBw0ePXRNJz204Vbfd/LvHB4fff++a",
+ "NbVCrX7Ta660PFhBngvXwd0N/XHNh4P//p//nc1md3ayU7F+tnljC/P8XnhqX60LN35ot77xTYpp6a5g",
+ "0k7U3YjD/ZlYR7m/WN/ePl/t9jHY/0PcOvM2GTkFtDZPtlJRr/EWssfkIvfQ1NfeNHynvkxm5I1wVQGq",
+ "nEoiZAbSPR6wrKikXANkM0+pZIHpv5gFneYMuDYKI5ZDl4liGdhkymUlISM5K/C9QAlnGKaO06Mu34Jg",
+ "N6PHYNbfLZN/TdfhQ8/1Na2FWzLmXRd07R9kwJLjQuJP339P9qeN1pLnZoCkRkyMuRZ0PblBa19NbKNC",
+ "wNs1k3fGyOLYYyxHjfRj376h7QKtf27O/c1K7Jbc3cZeE+e8sDen8daE9gOXe7/VcmAFO/tcA74fsCF1",
+ "bqyR8rwIFWdxZoaxRoHfsW9gp0k6qnx20Xt7iG+V/yuxki5BXZBtYOKn2vuEvoyQZ/TOLSau/YF8oIFD",
+ "SIrCe4QEWYBOVy4htoPXCO/x5ZiHGc+2x7iuW2TBLepXAw3rmuEjUSMT5YNcRfTKgYxQ6E++5KD5zBZY",
+ "7qAute3fnEN/E/PPsNQvsLh3qpjy4fU+b9bs4oWgfN5M3pe2EC3X4dS8RfDFENzjfC/9cx+IMbeIP0IA",
+ "vtcTE/JGNGnZrtL0H9Gf+CWv7S+9oDeCg3WcG7HW0uKtj7SWKdA+j0jx9TisciLrV8EvK1/s+YdrtgoZ",
+ "f7XPxmwVNMbc3mayb/IK/2v0ScjWLWPWNttZbKAZbQxzNg1tbdV2VdWvqKJ8FX76O9RbvgbHuhkWg4fU",
+ "8xknFvDrZTpY4sYS815dUHOIA8VrFI/mRlrUsWXRssJzyAVfqt8nK9pGHXG8RKikrt4cL9H85zu7z7F6",
+ "Dhe+UKWrp6QYT8E+zOTf5i2YUi4C8vH+X24OQs0KX4OOh6mkX5m7PNl/dHPTH4M8YymQEyhKIalk+Yb8",
+ "ndePaF2F22EB6rq+mTf1RmuOoyupXXcrDYsEXZ4JtuLRPuk1yz7vZoZBjbwL8kHGAz4Y1j2kZQlUXp4B",
+ "7vZLnXRmPHoRhvy26iLXFasioBgUXTDq/d8mI+1OmIUuFu7yq7gF1FfXcmzCxeOKxbSOfDFSgFgckPf8",
+ "PlEr+uTBw18fPvnO//nwyXcDljMzjyuK07edNQOZz3aYMQa036+t73pF8hp5Bze9lRfboemEZetoEdTm",
+ "oYPwXLjAHOQTdxQp6WawdnK546GGcNjm0YabrxSoNJvH39P3uk39GuARf1aruLacnXvf4PaBhoF0h4CJ",
+ "GEJrXmqosb790YYtomKHLOvq+DeteTZpAfYW88iTnQvlq0qx+mtpoAkqoMC91NJGy9cTGLFQ7zRwVNfv",
+ "q2LUSVWWQur6dKvZKFkOhhxuLVFuiHAvJKmlVKerqtz7hP/B8lifm1QB+8bwnvWzbxPWjm2LK959HanY",
+ "evdlmwn5imzO9y8W5DVLpTjEOs/uWlEbpaHolc1zXX/d9npt9AoSPGcckkLwWDG3n/Dra/wYrSQtNM2H",
+ "Op+Yj0N9u+/ht+DvgNWeZwwHvCp+fyf69JXsQJ3VSjDHtXmcx9L/BY9U672f5iy1ft771PrTxcy4lmpV",
+ "6UycB31RJ7McZIy7PCgZPd6UXaspndLLimSgDAl+e3ajAA8x+q+/Rmp2BYXBB8t2/UktSQvGsw6RoByY",
+ "ijOQqrYxSB/ecmtO+uOYk0bv+4U4pi1AuYujVep65Ys3IgM7brvmayw9k4sMXJ3MvlhRS05xLd3fMU27",
+ "jt6U0mq50qQqiRYxDa3pmNDUMln7Ipja9YSSbeWfCjkDQnMJNNuQOQAnYm4W3X6KjlCFoelezXPyYfwl",
+ "oAauUooUlIIsqd+W3wFaXX0UlUK9BU8IOAJcz0KUIAsqrwzs6dlOOOuK3Yrc/dvP6t5XgNcKdtsRawNi",
+ "I+it43Kc7NaHetz02wiuO3lIdlQC8aIBWqVEUebg7FIRFF4IJ4P714Wot4tXRwsabtgXpng/ydUIqAb1",
+ "C9P7VaGtysTc35G3yuzXE1agJMYpFwpSwTM1/KLgLraMr2YEa1FmBQEnjD7sbwYeUB9fUaXfOf9D+PBS",
+ "8DqHmWLLE4hDleHNyD/XdeF7Y6fmPuSqUnXxeGd2gCy2Bg7rLXO9gXU9FzqA/Ni1XUMLUinYNfIQloLx",
+ "HbJU+KahDjw3+HZGf3FYQ4Q6c0MflS0gGkRsA+TYtwqwG3oVBgDBF+XLUGF0D2g1cM2FyIFyax4WZWm4",
+ "hU4qXvcbQtOxbX2o/9607ROXe78H7+1MgAptTg7yc4tZhUkSK6qIg4MU9NSZpZauxlIfZnMYE/QVJ9so",
+ "3xzLY9MqPAI7DmnXtBEe/9Y56xyODv1GiW6QCHbswtCCY8aUbzIHqeur+oJRNm1jUiA+zy6jGuydU6aT",
+ "hZDuJVy60CAjlpBO7XTKtE9xslZjLZwPmOAIjuu4cdyLqk2dAPcYngWB+Ef8WBEpO2Km+kHIUXkK7YAd",
+ "yjSpuGZ5kKtZKxq/P3PLrQp1q0LdqlC3KtStCnWrQt2qULcq1K0KdatCXUWF+lqpHYnn1z4mjguecFhS",
+ "zc6gzvm4LTXxhwqFrk+6V+lQCTQqmCvcdsXcDw00x1WzHG/gUqjBGhj43qYSlUyBpAYmxkmZUyNLwVrX",
+ "hYPaJel8kUz34iZWuaMKHj0kx3899GGcKxdu2G5799AVm1V6k8M9l71bP4nn03iBGzS7LF7qVWBfYMiV",
+ "W2I5EGUQ+hJbv4AzyI06ZyPEiFFI+yryCdD8ucPNDg259eiZGe3jtKWYO7QVtAxeFsa1UkUohvx23ixb",
+ "0FwNP1pmxytoGavxUzNzqzsj/3gmsk3nTJhd28MNbJ+GJpiTcSo3kSjt3hnokYYWhkM5wuor/5+vPeS4",
+ "T7R9MttFYTHxRoKKntxtVB6Nta03rDeUjfdedOgk+mJnN8B0UgM4JnzK0LPfE/LO9vu62YoIkTtiDfv+",
+ "3cSptFvWTAPbGqnLsZ5vNbXQIz56evHsTw1hZ1UKhGlFfNTy7utlOlknZqQl8MQxoGQusk3SYl+T1i2U",
+ "MUWVgmK++yYK+aeraukuH/Nl+z31da6RF8HitvHkkGjWiWPAA9zZhtqP4801tnBEx54DjH9pFj3ERkMQ",
+ "iONPMS28+5bABZleM83mlvHdMr7gNHYkAsZdlkeXicy+IOOTG1nxYZ73cg1pZYALT/JdNGeiDwPWuuUI",
+ "ymBeLZdYnbPn1DBLAxyPCf6VWKFd7lgueDEKsoPXFduuWkekO1yfuwQZDXeFJEspqvKefYaEb9D6W5SU",
+ "b7yPDBLFiiq3OLS1j66X0dpEjNgz9N6WN2wGfOutfYGxy1217d8tWsg5Ve45cshIxTMXd95L11rz8ZVB",
+ "7dAna96w6a21Qe16I6tz8465Ivwuu5Dq2i9Ygkz0mtsD1S7fa9PC7Mmd3VYl/HNcG2/tcz8DDLaf4tQw",
+ "hGu6PWTA1/D6CLLUm9SL9lsq9qWnoUDlMGXdtrxWb3tv+LbTPXhnyTqVIC8J9SWjU8GVllWq33OKRu1g",
+ "YbO+Q96b6of523PfJO5Xibg93FDvOcWKwrWpO8rnFhBxYv0A4NmoqpZLUIZXhkSyAHjPXSvGScWNpiUW",
+ "pGCpFIlNYjJnyMgnM9uyoBuyoDl6ZX4DKcjc3OzBrlsTsdIsz10EgJmGiMV7TjXJgSpNXjPDZc1w3opY",
+ "h76APhfytMZCPMl5CRwUU0nc+PKj/Yp5xG753siHBkv7ucn/u9kEYg87ywYhP3ph4KZYDyFnSjdO4x7s",
+ "N+YwLBhPokR2sgLiYmi6tEXuGsbrCehe45V3u/6emxtOC4JcnerLkUPXsdM7i/Z0dKimtREd/49f64dY",
+ "0cWlSIwcR5fm9yXTq2o+S0Wx54sx7i1FXZhxL6NQCI7fsj1asj1VQrp39mCHOHcFfkUi7OrWLfMHSikK",
+ "6MCclnrj8dWC7t5f0CGz9SG02FdXnsY3sgcOxQEDN6SVZHqDLgtasl9Pwfz/w+cP5ps8896MSuaTg8lK",
+ "6/Jgbw+fMFsJpfcmn6fhN9X5+KFe2ifvmCglO8Oipx8+//8AAAD//zLWiFKJKAEA",
+}
+
+// GetSwagger returns the content of the embedded swagger specification file
+// or error if failed to decode
+func decodeSpec() ([]byte, error) {
+ zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
+ if err != nil {
+ return nil, fmt.Errorf("error base64 decoding spec: %s", err)
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(zipped))
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+ var buf bytes.Buffer
+ _, err = buf.ReadFrom(zr)
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+
+ return buf.Bytes(), nil
+}
+
+var rawSpec = decodeSpecCached()
+
+// a naive cached of a decoded swagger spec
+func decodeSpecCached() func() ([]byte, error) {
+ data, err := decodeSpec()
+ return func() ([]byte, error) {
+ return data, err
+ }
+}
+
+// Constructs a synthetic filesystem for resolving external references when loading openapi specifications.
+func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) {
+ var res = make(map[string]func() ([]byte, error))
+ if len(pathToFile) > 0 {
+ res[pathToFile] = rawSpec
+ }
+
+ return res
+}
+
+// GetSwagger returns the Swagger specification corresponding to the generated code
+// in this file. The external references of Swagger specification are resolved.
+// The logic of resolving external references is tightly connected to "import-mapping" feature.
+// Externally referenced files must be embedded in the corresponding golang packages.
+// Urls can be supported but this task was out of the scope.
+func GetSwagger() (swagger *openapi3.T, err error) {
+ var resolvePath = PathToRawSpec("")
+
+ loader := openapi3.NewLoader()
+ loader.IsExternalRefsAllowed = true
+ loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
+ var pathToFile = url.String()
+ pathToFile = path.Clean(pathToFile)
+ getSpec, ok := resolvePath[pathToFile]
+ if !ok {
+ err1 := fmt.Errorf("path not found: %s", pathToFile)
+ return nil, err1
+ }
+ return getSpec()
+ }
+ var specData []byte
+ specData, err = rawSpec()
+ if err != nil {
+ return
+ }
+ swagger, err = loader.LoadFromData(specData)
+ if err != nil {
+ return
+ }
+ return
+}
diff --git a/daemon/algod/api/server/v2/generated/participating/private/private_routes.yml b/daemon/algod/api/server/v2/generated/participating/private/private_routes.yml
new file mode 100644
index 000000000..e9e874bf6
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/participating/private/private_routes.yml
@@ -0,0 +1,19 @@
+package: private
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ include-tags:
+ - participating
+ - private
+ exclude-tags:
+ - public
+ - nonparticipating
+ - common
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+additional-imports:
+ - alias: "."
+ package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+output: ./server/v2/generated/participating/private/routes.go
diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go
new file mode 100644
index 000000000..ca1a6eef4
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go
@@ -0,0 +1,389 @@
+// Package private provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
+package private
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+
+ . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/oapi-codegen/pkg/runtime"
+ "github.com/getkin/kin-openapi/openapi3"
+ "github.com/labstack/echo/v4"
+)
+
+// ServerInterface represents all server handlers.
+type ServerInterface interface {
+ // Return a list of participation keys
+ // (GET /v2/participation)
+ GetParticipationKeys(ctx echo.Context) error
+ // Add a participation key to the node
+ // (POST /v2/participation)
+ AddParticipationKey(ctx echo.Context) error
+ // Delete a given participation key by ID
+ // (DELETE /v2/participation/{participation-id})
+ DeleteParticipationKeyByID(ctx echo.Context, participationId string) error
+ // Get participation key info given a participation ID
+ // (GET /v2/participation/{participation-id})
+ GetParticipationKeyByID(ctx echo.Context, participationId string) error
+ // Append state proof keys to a participation key
+ // (POST /v2/participation/{participation-id})
+ AppendKeys(ctx echo.Context, participationId string) error
+}
+
+// ServerInterfaceWrapper converts echo contexts to parameters.
+type ServerInterfaceWrapper struct {
+ Handler ServerInterface
+}
+
+// GetParticipationKeys converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetParticipationKeys(ctx)
+ return err
+}
+
+// AddParticipationKey converts echo context to params.
+func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AddParticipationKey(ctx)
+ return err
+}
+
+// DeleteParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.DeleteParticipationKeyByID(ctx, participationId)
+ return err
+}
+
+// GetParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeyByID(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetParticipationKeyByID(ctx, participationId)
+ return err
+}
+
+// AppendKeys converts echo context to params.
+func (w *ServerInterfaceWrapper) AppendKeys(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "participation-id", runtime.ParamLocationPath, ctx.Param("participation-id"), &participationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AppendKeys(ctx, participationId)
+ return err
+}
+
+// This is a simple interface which specifies echo.Route addition functions which
+// are present on both echo.Echo and echo.Group, since we want to allow using
+// either of them for path registration
+type EchoRouter interface {
+ CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+}
+
+// RegisterHandlers adds each server route to the EchoRouter.
+func RegisterHandlers(router EchoRouter, si ServerInterface, m ...echo.MiddlewareFunc) {
+ RegisterHandlersWithBaseURL(router, si, "", m...)
+}
+
+// Registers handlers, and prepends BaseURL to the paths, so that the paths
+// can be served under a prefix.
+func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string, m ...echo.MiddlewareFunc) {
+
+ wrapper := ServerInterfaceWrapper{
+ Handler: si,
+ }
+
+ router.GET(baseURL+"/v2/participation", wrapper.GetParticipationKeys, m...)
+ router.POST(baseURL+"/v2/participation", wrapper.AddParticipationKey, m...)
+ router.DELETE(baseURL+"/v2/participation/:participation-id", wrapper.DeleteParticipationKeyByID, m...)
+ router.GET(baseURL+"/v2/participation/:participation-id", wrapper.GetParticipationKeyByID, m...)
+ router.POST(baseURL+"/v2/participation/:participation-id", wrapper.AppendKeys, m...)
+
+}
+
+// Base64 encoded, gzipped, json marshaled Swagger object
+var swaggerSpec = []string{
+
+ "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka76vy44Yz8iPZtapS38mWk9XF8bosJXv32b4EQ/bMYEUCXACUZuLT",
+ "/36FBkCCJDhDPSJvqvyTrSEejUaj0W98nqSiKAUHrtXk8POkpJIWoEHiXzRNRcV1wjLzVwYqlazUTPDJ",
+ "of9GlJaMrybTCTO/llSvJ9MJpwU0bUz/6UTCvyomIZscalnBdKLSNRTUDKy3pWldj7RJViJxQxzZIU6O",
+ "J1c7PtAsk6BUH8q/83xLGE/zKgOiJeWKpuaTIpdMr4leM0VcZ8I4ERyIWBK9bjUmSwZ5pmZ+kf+qQG6D",
+ "VbrJh5d01YCYSJFDH85XolgwDh4qqIGqN4RoQTJYYqM11cTMYGD1DbUgCqhM12Qp5B5QLRAhvMCrYnL4",
+ "YaKAZyBxt1JgF/jfpQT4HRJN5Qr05NM0trilBploVkSWduKwL0FVuVYE2+IaV+wCODG9ZuSnSmmyAEI5",
+ "ef/9K/Ls2bMXZiEF1RoyR2SDq2pmD9dku08OJxnV4D/3aY3mKyEpz5K6/fvvX+H8p26BY1tRpSB+WI7M",
+ "F3JyPLQA3zFCQoxrWOE+tKjf9IgciubnBSyFhJF7Yhvf6aaE83/RXUmpTtelYFxH9oXgV2I/R3lY0H0X",
+ "D6sBaLUvDaakGfTDQfLi0+cn0ycHV//x4Sj5b/fnN8+uRi7/VT3uHgxEG6aVlMDTbbKSQPG0rCnv4+O9",
+ "owe1FlWekTW9wM2nBbJ615eYvpZ1XtC8MnTCUimO8pVQhDoyymBJq1wTPzGpeG7YlBnNUTthipRSXLAM",
+ "sqnhvpdrlq5JSpUdAtuRS5bnhgYrBdkQrcVXt+MwXYUoMXDdCB+4oH9fZDTr2oMJ2CA3SNJcKEi02HM9",
+ "+RuH8oyEF0pzV6nrXVbkbA0EJzcf7GWLuOOGpvN8SzTua0aoIpT4q2lK2JJsRUUucXNydo793WoM1gpi",
+ "kIab07pHzeEdQl8PGRHkLYTIgXJEnj93fZTxJVtVEhS5XINeuztPgioFV0DE4p+QarPt/+v072+JkOQn",
+ "UIqu4B1NzwnwVGTDe+wmjd3g/1TCbHihViVNz+PXdc4KFgH5J7phRVUQXhULkGa//P2gBZGgK8mHALIj",
+ "7qGzgm76k57Jiqe4uc20LUHNkBJTZU63M3KyJAXdfHcwdeAoQvOclMAzxldEb/igkGbm3g9eIkXFsxEy",
+ "jDYbFtyaqoSULRlkpB5lByRumn3wMH49eBrJKgDHDzIITj3LHnA4bCI0Y46u+UJKuoKAZGbkZ8e58KsW",
+ "58BrBkcWW/xUSrhgolJ1pwEYcerd4jUXGpJSwpJFaOzUocNwD9vGsdfCCTip4JoyDpnhvAi00GA50SBM",
+ "wYS7lZn+Fb2gCr59PnSBN19H7v5SdHd9546P2m1slNgjGbkXzVd3YONiU6v/COUvnFuxVWJ/7m0kW52Z",
+ "q2TJcrxm/mn2z6OhUsgEWojwF49iK051JeHwI39s/iIJOdWUZ1Rm5pfC/vRTlWt2ylbmp9z+9EasWHrK",
+ "VgPIrGGNalPYrbD/mPHi7FhvokrDGyHOqzJcUNrSShdbcnI8tMl2zOsS5lGtyoZaxdnGaxrX7aE39UYO",
+ "ADmIu5KahuewlWCgpekS/9kskZ7oUv5u/inL3PTW5TKGWkPH7r5F24CzGRyVZc5SapD43n02Xw0TAKsl",
+ "0KbFHC/Uw88BiKUUJUjN7KC0LJNcpDRPlKYaR/pPCcvJ4eQ/5o1xZW67q3kw+RvT6xQ7GXnUyjgJLctr",
+ "jPHOyDVqB7MwDBo/IZuwbA8lIsbtJhpSYoYF53BBuZ41+kiLH9QH+IObqcG3FWUsvjv61SDCiW24AGXF",
+ "W9vwgSIB6gmilSBaUdpc5WJR//DwqCwbDOL3o7K0+EDREBhKXbBhSqtHuHzanKRwnpPjGfkhHBvlbMHz",
+ "rbkcrKhh7oalu7XcLVYbjtwamhEfKILbKeTMbI1Hg5Hh74LiUGdYi9xIPXtpxTT+m2sbkpn5fVTnPweJ",
+ "hbgdJi7UohzmrAKDvwSay8MO5fQJx9lyZuSo2/dmZGNGiRPMjWhl537acXfgsUbhpaSlBdB9sXcp46iB",
+ "2UYW1lty05GMLgpzcIYDWkOobnzW9p6HKCRICh0YXuYiPf8bVes7OPMLP1b/+OE0ZA00A0nWVK1nk5iU",
+ "ER6vZrQxR8w0RO2dLIKpZvUS72p5e5aWUU2DpTl442KJRT32Q6YHMqK7/B3/Q3NiPpuzbVi/HXZGzpCB",
+ "KXucnQchM6q8VRDsTKYBmhgEKaz2TozWfS0oXzWTx/dp1B69tgYDt0NuEbhDYnPnx+Cl2MRgeCk2vSMg",
+ "NqDugj7MOChGaijUCPiOHWQC99+hj0pJt30k49hjkGwWaERXhaeBhze+maWxvB4thLwZ9+mwFU4aezKh",
+ "ZtSA+U47SMKmVZk4UozYpGyDzkCNC2830+gOH8NYCwunmv4BWFBm1LvAQnugu8aCKEqWwx2Q/jrK9BdU",
+ "wbOn5PRvR988efrr02++NSRZSrGStCCLrQZFHjrdjCi9zeFRf2WoHVW5jo/+7XNvhWyPGxtHiUqmUNCy",
+ "P5S1bloRyDYjpl0fa20046prAMcczjMwnNyinVjDvQHtmCkjYRWLO9mMIYRlzSwZcZBksJeYrru8Zppt",
+ "uES5ldVdqLIgpZAR+xoeMS1SkScXIBUTEVfJO9eCuBZevC27v1toySVVxMyNpt+Ko0ARoSy94eP5vh36",
+ "bMMb3Ozk/Ha9kdW5ecfsSxv53pKoSAky0RtOMlhUq5YmtJSiIJRk2BHv6DdstdaByPJOCrG881s7Okts",
+ "SfjBCny56dMX+96KDIzaXak7YO/NYA32DOWEOKMLUWlCCRcZoI5eqTjjH3D0oocJHWM6vEv02spwCzD6",
+ "YEors9qqJOj26dFi0zGhqaWiBFGjBuzitUPDtrLTWSdiLoFmRk8ETsTCGZ+dWRwXSdFnpT3rdNdORHNu",
+ "wVVKkYJSRr+3Wtte0Hw7S5Z6B54QcAS4noUoQZZU3hrY84u9cJ7DNkEPqyIPf/xFPfoC8Gqhab4Hsdgm",
+ "ht5ahXAehj7U46bfRXDdyUOyoxKI531GXzEMIgcNQyi8Fk4G968LUW8Xb4+WC5Bo6/9DKd5PcjsCqkH9",
+ "g+n9ttBW5UDckBOdz1iBliBOuVCQCp6p6GA5VTrZx5ZNo5Z8b1YQcMIYJ8aBB6yRb6jS1j/FeIZqtb1O",
+ "cB5rpjRTDAM8KOKYkX/x0k1/7NTcg1xVqhZ1VFWWQmrIYmvgsNkx11vY1HOJZTB2LU9pQSoF+0YewlIw",
+ "vkOWXYlFENW1Gdc5cPuLQ2Onuee3UVS2gGgQsQuQU98qwG4YOzEACFMNoi3hMNWhnDpgYzpRWpSl4RY6",
+ "qXjdbwhNp7b1kf65adsnLqqbezsTYGbXHiYH+aXFrI2aWVOjlOHIpKDnRvZAFcs60vowm8OYKMZTSHZR",
+ "vjmWp6ZVeAT2HNIB7dbF5QWzdQ5Hh36jRDdIBHt2YWjBA6r2Oyo1S1mJkuKPsL1zwbk7QdQATDLQlBn1",
+ "L/hghegy7E+sZ7Q75s0E6VFaUR/8nloUWU7OFF4YbeDPYYueoHc25OYsCNS5A00gMqo53ZQTBNQ78o0A",
+ "EzaBDU11vjXXnF7DllyCBKKqRcG0tjFUbUVBizIJB4hanHbM6MyrNlzF78AYe+8pDhUsr78V04mVqHbD",
+ "d9YRq1rocJJUKUQ+wtPWQ0YUglGeOFIKs+vMhez5uC5PSS0gnRCDtvWaeT5QLTTjCsj/ERVJKUeBtdJQ",
+ "3whCIpvF69fMYC6wek7nc2swBDkUYOVw/PL4cXfhjx+7PWeKLOHSx7mahl10PH6MWvA7oXTrcN2BCcYc",
+ "t5MIb0dTnLkonAzX5Sn7fT5u5DE7+a4zeG2/M2dKKUe4Zvm3ZgCdk7kZs/aQRsb5u3DcUVa2YOjYunHf",
+ "MeDgj7HRNEPHoOtPHLhpm49DnlojX+XbO+DTdiAioZSg8FSFeomyX8UyDIV2x05tlYaib7qxXX8dEGze",
+ "e7GgJ2UKnjMOSSE4bKPZP4zDT/gx1tue7IHOyGOH+nbFphb8HbDa84yhwtviF3c7IOV3dYjCHWx+d9yO",
+ "1S4MAketFPKSUJLmDHVWwZWWVao/copScXCWI64cL+sP60mvfJO4YhbRm9xQHzlFN14tK0fNz0uIaMHf",
+ "A3h1SVWrFSjdkQ+WAB+5a8U4qTjTOFdh9iuxG1aCRH/KzLYs6JYsaY5q3e8gBVlUun1jYqyq0kbrsiZE",
+ "Mw0Ry4+capKD0UB/Yvxsg8P5kFBPMxz0pZDnNRZm0fOwAg6KqSTucvrBfsVoALf8tYsMwMQh+9kancz4",
+ "TUDrVkMrGeb/Pvyvww9HyX/T5PeD5MX/mH/6/Pzq0ePej0+vvvvu/7V/enb13aP/+s/YTnnYY5GUDvKT",
+ "YydNnhyjyNBYnXqw35vFoWA8iRLZ2RpIwTgG5Hdoizw0go8noEeNWc/t+keuN9wQ0gXNWUb1zcihy+J6",
+ "Z9Gejg7VtDaio0D6tX6KRUesRFLS9Bw9tpMV0+tqMUtFMfdS9Hwlaol6nlEoBMdv2ZyWbK5KSOcXT/Zc",
+ "6bfgVyTCrjpM9sYCQd/fG49+RoOqC2jGk7esuCWKSjmjLgb3eb+bWE7rCHeb2XpIMPx5Tb3T2P359Jtv",
+ "J9MmbLn+bjR1+/VT5EywbBMLTs9gE5PU3FHDI/ZAkZJuFeg4H0LYoy5G65cKhy3AiPhqzcr75zlKs0Wc",
+ "V/qQKafxbfgJt7FM5iSieXbrrD5ief9wawmQQanXsYy3lsyBrZrdBOi4zEopLoBPCZvBrKtxZStQ3tmZ",
+ "A11i5hWaGMWYEND6HFhC81QRYD1cyCi1JkY/KCY7vn81nTgxQt25ZO8GjsHVnbO2xfq/tSAPfnh9RuaO",
+ "9aoHNk/CDh1EtkcsGS54s+VMNdzM5vnaRJGP/CM/hiXjzHw//Mgzqul8QRVL1bxSIF/SnPIUZitBDn08",
+ "6DHV9CPvyWyDqfhBJC4pq0XOUnIeytYNedr0yv4IHz9+MBz/48dPPb9SXxJ2U0X5i50guWR6LSqduPyx",
+ "RMIllVkEdFXnD+HINvtz16xT4sa2rNjlp7nx4zyPlqXq5hH0l1+WuVl+QIbKRcmbLSNKC+mlGiPqWGhw",
+ "f98KdzFIeumTDysFivxW0PID4/oTST5WBwfPgLQC639zwoOhyW0JLZvXjfIcuvYuXLjVkGCjJU1KugIV",
+ "Xb4GWuLuo+RdoHU1zwl2awX0+4AlHKpZgMfH8AZYOK4dnIyLO7W9fCGA+BLwE24htjHiRuO0uOl+BSH+",
+ "N96uTppAb5cqvU7M2Y6uShkS9ztT5wevjJDlPUmKrbg5BC6VegEkXUN6DhlmdUJR6u201d07K53I6lkH",
+ "Uzb72QboYooemgcXQKoyo06op3zbzZVSoLVPEHsP57A9E02G33WSo9q5OmrooCKlBtKlIdbw2Loxupvv",
+ "POKYn1CWPuUFY589WRzWdOH7DB9kK/LewSGOEUUrl2QIEVRGEGGJfwAFN1ioGe9WpB9bntFXFvbmiyRL",
+ "e95PXJNGDXPO63A1mCJjvxeApRTEpSILauR24aoA2HyUgItViq5gQEIOLbQjsz5aVl0cZN+9F73pxLJ7",
+ "ofXumyjItnFi1hylFDBfDKmgMtMJWfAzWScArmBGsLiPQ9giRzGpju2wTIfKlqXcVisZAi1OwCB5I3B4",
+ "MNoYCSWbNVW+QAHWcfBneZQM8AfmV+3Kqj0JvO1BsYY6Z9bz3O457WmXLrfWJ9T6LNpQtRyREWskfAzw",
+ "i22H4CgAZZDDyi7cNvaE0uR6NRtk4Pj7cpkzDiSJOe6pUiJltsJEc824OcDIx48JscZkMnqEGBkHYKNz",
+ "Cwcmb0V4NvnqOkByl6tG/djoFgv+hnhYrQ1lMyKPKA0LZ3wgaNJzAOqiPer7qxNzhMMQxqfEsLkLmhs2",
+ "5zS+ZpBecieKrZ1UTudefTQkzu6w5duL5VprslfRTVYTykwe6LhAtwPihdgkNq4+KvEuNgtD79HoPozy",
+ "jx1Mm0b7QJGF2KDLHq8WG022B5ZhODwYgYa/YQrpFfsN3eYWmF3T7pamYlSokGScOa8mlyFxYszUAxLM",
+ "ELk8DDJjbwRAx9jR1JBzyu9eJbUtnvQv8+ZWmzYVH3zgdOz4Dx2h6C4N4K9vhalzWd91JZaonaLteW6n",
+ "8QYiZIzoDZvou3v6TiUFOaBSkLSEqOQ85gQ0ug3gjXPquwXGC0wWpnz7KAhnkLBiSkNjjjcXs/cv3bd5",
+ "kmKNEiGWw6vTpVya9b0Xor6mbBI8dmwt895XcCE0JEsmlU7QlxFdgmn0vUKl+nvTNC4rtQMmbLkulsV5",
+ "A057DtskY3kVp1c374/HZtq3NUtU1QL5LeMEaLomCywvFw2j2jG1jbTbueA3dsFv6J2td9xpME3NxNKQ",
+ "S3uOP8m56HDeXewgQoAx4ujv2iBKdzBIlH2OIdexDMhAbrKHMzMNZ7usr73DlPmx9wagWCiG7yg7UnQt",
+ "gcFg5yoYuomMWMJ0UJ2tn9UzcAZoWbJs07GF2lEHNWZ6LYOHL3vRwQLurhtsDwYCu2cssFiCalc4aQR8",
+ "W2evlWA8G4WZs3YdkpAhhFMx5avE9hFVJx7sw9UZ0PxH2P5i2uJyJlfTye1MpzFcuxH34Ppdvb1RPKOT",
+ "35rSWp6Qa6KclqUUFzRPnIF5iDSluHCkic29PfqeWV3cjHn2+ujNOwf+1XSS5kBlUosKg6vCduWfZlW2",
+ "mMrAAfFVKI3O52V2K0oGm19XgAiN0pdrcBX/Amm0V5qocTgER9EZqZfxWKO9JmfnG7FL3OEjgbJ2kTTm",
+ "O+shaXtF6AVlubebeWgH4oJwcePqW0W5QjjArb0rgZMsuVN20zvd8dPRUNcenhTOtaMmYWHLbioieNeF",
+ "bkRINMchqRYUCwtZq0ifOfGqQEtConKWxm2sfKEMcXDrOzONCTYeEEbNiBUbcMXyigVjmWZqhKLbATKY",
+ "I4pMX6RqCHcL4eqlV5z9qwLCMuDafJJ4KjsHFSs5OWt7/zo1skN/LjewtdA3w99GxgiLanVvPARit4AR",
+ "eup64B7XKrNfaG2RMj8ELolrOPzDGXtX4g5nvaMPR802DHLd9riF5c37/M8Qhi2Fub+2uldeXXWvgTmi",
+ "tdKZSpZS/A5xPQ/V40jWgS8jxjDK5Xfgs0jyVpfF1NadpuR7M/vgdg9JN6EVqh2kMED1uPOBWw7rGXkL",
+ "NeV2q23p4lasW5xgwvjUuR2/IRgHcy+mN6eXCxor9mSEDAPTUeMAbtnStSC+s8e9M/szV9ltRgJfct2W",
+ "2Xy8EmSTENTP7b+hwGCnHS0qNJIBUm0oE0yt/y9XIjJMxS8ptxWwTT97lFxvBdb4ZXpdConZtCpu9s8g",
+ "ZQXN45JDlvZNvBlbMVv/uVIQFBh2A9nC+ZaKXJFm62JvUHOyJAfToIS5242MXTDFFjlgiye2xYIq5OS1",
+ "IaruYpYHXK8VNn86ovm64pmETK+VRawSpBbqUL2pnVcL0JcAnBxguycvyEN02yl2AY8MFt39PDl88gKN",
+ "rvaPg9gF4Aq97+ImGbKTfzh2Eqdj9FvaMQzjdqPOormh9nWOYca14zTZrmPOErZ0vG7/WSoopyuIR4oU",
+ "e2CyfXE30ZDWwQvPbGl5paXYEqbj84Omhj8NxLEb9mfBIKkoCqYL59xRojD01FQPtpP64Wydelf4zcPl",
+ "P6KPtPQuoo4Seb9GU3u/xVaNnuy3tIA2WqeE2hTqnDXRC74cJTnxFRqwEl5dAM/ixsxllo5iDgYzLEkp",
+ "GdeoWFR6mfyVpGsqaWrY32wI3GTx7fNI9b92FSp+PcDvHe8SFMiLOOrlANl7GcL1JQ+54ElhOEr2qMkb",
+ "CU7loDM37rYb8h3uHnqsUGZGSQbJrWqRGw049a0Ij+8Y8JakWK/nWvR47ZXdO2VWMk4etDI79PP7N07K",
+ "KISMlV1qjruTOCRoyeACY/fim2TGvOVeyHzULtwG+i/refAiZyCW+bMcUwReioh26itS1pZ0F6sesQ4M",
+ "HVPzwZDBwg01Je3qf/fv9PPG577zyXzxsOIfXWC/8JYikv0KBjYxqEwa3c6s/h74vyl5KTZjN7VzQvzG",
+ "/hugJoqSiuXZL01+Z6fwq6Q8XUf9WQvT8dfmiYp6cfZ+ilY3WlPOIY8OZ2XBX73MGJFq/ynGzlMwPrJt",
+ "txatXW5ncQ3gbTA9UH5Cg16mczNBiNV2wlsdUJ2vREZwnqaUTsM9+zWMg0qT/6pA6VjyEH6wQV1otzT6",
+ "ri10SIBnqC3OyA/2ibk1kFalD9TSWFHltmoEZCuQzqBelbmg2ZSYcc5eH70hdlbbxxZat4UWV6iktFfR",
+ "sVcFVcLGhQf7munx1IXx4+yOpTarVhoL7yhNizKWZmpanPkGmMsa2vBRfQmxMyPHVnNUXi+xkxh6WDJZ",
+ "GI2rHs3KLkgT5j9a03SNKlmLpQ6T/PgKoZ4qVfAqT11dvy6dhefOwO2KhNoaoVMijN58yZR9WQwuoJ3Z",
+ "Wqd5O5OAz3RtL09WnFtKicoeu8oQ3ATtHjgbqOHN/FHIOoi/pkBuC+xet2DqKfaK1qLpVl/tPcdjsxvr",
+ "qun+xciUcsFZipVgYleze6VsjA9sRNGcrpHVH3F3QiOHK1rztQ6Tc1gcrALrGaFDXN8IH3w1m2qpw/6p",
+ "8TmsNdVkBVo5zgbZ1JcudnZAxhW4Umj4YF3AJ4Vs+RWRQ0Zd1Unt0rgmGWFazIBi97359tap/Rgvfs44",
+ "CvgObS403Vrq8BElbbQCpslKgHLraecGqw+mzwzTZDPYfJr5R5dwDOuWM8u2Puj+UEfeI+08wKbtK9PW",
+ "FkVpfm5FINtJj8rSTTpc2DoqD+gNH0RwxLOYeNdOgNx6/HC0HeS2M5QE71NDaHCBjmgo8R7uEUZd5Lnz",
+ "gIARWi1FYQtiQ7iitRAYj4DxhnFongSLXBBp9ErAjcHzOtBPpZJqKwKO4mlnQHP0PscYmtLO9XDboTob",
+ "jCjBNfo5hrexqU89wDjqBo3gRvm2fonMUHcgTLzCJxAdIvvVplGqckJUhhkFnfrTMcZhGLevcN++APrH",
+ "oC8T2e5aUntyrnMTDSWJLqpsBTqhWRarIfkSvxL8SrIKJQfYQFrVNfjKkqRYXaVdbqZPbW6iVHBVFTvm",
+ "8g1uOV0qYnL0W5xA+ZSJZvAZQfZrWO/x63fvX786Ont9bO8LRVRls0SNzC2hMAxxRk640mBE50oB+S1E",
+ "42/Y77fOguNgBnXnI0Qb1r73hIi5Most/hurkzdMQC5W5NrRij4wBDteW7xvj9QTzs3RSxRbJeMxgVff",
+ "7dHRTH2z89j0v9MDmYtVG5B7rmCxixmHexRjw6/N/RYWeOgVf7Q3YF1/AWMDhX8tCLXbOnO4zTzxxu1V",
+ "g0SfVP0ayW47yfC7IlO8owcihIO6HdSKAdbJORQnnA6GtVPtEuw0JTs55WDSkg0ysulJ9lHsqIF3KLDI",
+ "xhWZz73e4wTYnjqAY+9EqI9Y6wP0ow+HJSVlzoPfMIs+Zl3g/LBVc9ehaza4uwgXjj5oWIw/7jBcQqcp",
+ "m4PXQCkUawrWxl59GBkudYYPNwQlgPpj+ViFC0i1EeoDH6wEuE5BIDNZ8EbN11I6A+pHHVXmKujsKpvT",
+ "L028h9n0MluC7Cxb1nU2vkjMUR1pg/5/fCVmBdw9E9OOWR8dObtcQqrZxZ5Mon8YLbXJUpl6PdY+9xYk",
+ "FrE6EtM/w39N9boBaFeiz054gtJytwZnKI/gHLYPFGlRQ7TO7NTzvJvUIEAMIHdIDIkIFfNkW8Obcy4y",
+ "VVMGYsFHjtju0FRzGizwH+TF3XAuT5KEhrlyO6a8EDHNfdRcpuu1MkgxqHAo2ahfYntYEDrGiuaqfnyn",
+ "fmc/0GrISb/S26WrgYB5X7Wt2VdDAOV/80medpacnUP4BAFa9i+pzHyLqKrqteBkx33UyxDy5aG7QC/r",
+ "mVkT59fPCYnUDsJozjQXivFVMhQS2w6tC99+xQACvA6wdjnCtQTpnmpBE3IuFCRa+LjAXXDsQoV7p/Qm",
+ "SFCD9foscINVNN43ZUKwAirFqhnUBUeECzR6KzXQyaCYx/Ccu5D9yn73SRC+AuYIjdzRa7K3GoeP8GSq",
+ "h8SQ6pfE3Zb7kytuovUyzu1TYypW2YMbVIbW41KKrErtBR0ejMbGMLZuzg5WElUY0/4qe7J/jlWk3gSp",
+ "auewnVv5O11T3pTzah9rK0LZNQSp4Z3dvlODQFz3yVd2Aas7gfNLKtXTSSlEngyYi0/6BUq6Z+CcpeeQ",
+ "EXN3+NiogSL/5CFaKWt/4OV66wtylCVwyB7NCDFqeVHqrXcNtmvtdibnD/Su+Tc4a1bZmkFO35995PGw",
+ "PqzmI2/J3/wwu7maAsP8bjmVHWRP+YvNQHEUSS8jT16MfdE44qzrPkPQEJWFIial3DAXetT57uv8EdIP",
+ "6vDv1n7CUglNDJa0piOUlrxBpyu8/NRYhMa9COA77AEvVIqDNwE8N3LgfOFAqZ9qpARLGaSE1vL36dn+",
+ "Ie6aLwVbpDCy3izTFq6xTvb2vgRGFPWqtk3E8dw3YWBdBMGxVkzf9KHQlIglZ0PCMedSXtD8/s0XWDDj",
+ "CPHhHraKLzTUf0MkW1Sqm0UrvKGj5g503bubmr9Dc8s/wOxR1AbshnJ21PotBl9CEkuj0ZzkonmTBYck",
+ "lzimNRo/+ZYsXKR1KSFlinWSUC59Ncxa3cPi0M17Z7v1y33r/EXoW5CxUxBESd42lfW0wPuhgbA5ol+Y",
+ "qQyc3CiVx6ivRxYR/MV4VJjyvOe6OG9Zk22l0k40h5Bwx1blwI19TatyP5l77PJwHXjpVAr66xx9W7dw",
+ "G7mom7WNdYn0kbur/NoYT0a8qqLpjq4UixAsSUoQVPLbk9+IhCW+OSDI48c4wePHU9f0t6ftz+Y4P34c",
+ "FePuzYnSevrdzRujmF+Gov9shNtAoGlnPyqWZ/sIoxU23Lz/gYGxv7rEgS/yAsmv1p7aP6qudvt13Lfd",
+ "TUDERNbamjyYKggIHhEL7LrNoo/zK0gryfQW6xl48xv7NVon6ofaYu88PnUGrLv7tDiHuiJGY9+vlL9d",
+ "fxD2Mf/CyNToPNf4GNzrDS3KHNxB+e7B4i/w7K/Ps4NnT/6y+OvBNwcpPP/mxcEBffGcPnnx7Ak8/es3",
+ "zw/gyfLbF4un2dPnTxfPnz7/9psX6bPnTxbPv33xlweGDxmQLaATnz03+d/4TE9y9O4kOTPANjihJavf",
+ "gDRk7F8IoCmeRCgoyyeH/qf/6U/YLBVFM7z/deKScyZrrUt1OJ9fXl7Owi7zFRr0Ei2qdD338/Tf3nt3",
+ "UgdY24Rv3FEbO2tIATfVkcIRfnv/+vSMHL07mTUEMzmcHMwOZk/wZa0SOC3Z5HDyDH/C07PGfZ87Ypsc",
+ "fr6aTuZroDn6v8wfBWjJUv9JXdLVCuTMPZVgfrp4OveixPyzM2Ze7fo2D6uOzj+3bL7Znp5YlXD+2Sfb",
+ "727dymZ3tu6gw0godjWbLzCHZ2xTUEHj4aXYV77nn1FEHvx97hIb4h9RVbFnYO4dI/GWLSx91hsDa6eH",
+ "e0R2/rl51TkAywb2z+0rZs3PvYrWK4hmGGCsP931PCnSriX7kwy5ke69torlMa2xEkn66cHBn+Ph1efX",
+ "BHSnJaQVBxMB5iXNiE/2wLmf3N/cJxz9ooZDEcuBEYLn9wdBuxbpj7Alb4Um36OqcDWdfHOfO3HCjeBC",
+ "c4Itg1ID/SPyMz/n4pL7lubqroqCyu3o46PpSqGpTrIL6gSnoDz15BPahm3gYvuoHWVZj+itCANKvxTZ",
+ "dgfGCrUqXXBug7RGgmPcLKGvAvbf++q9jnoOW2I9Z95C6l4Hb2QrLSu4uiVP+NM+5PqVp3zlKdJO/+z+",
+ "pj8FecFSIGdQlEJSyfIt+ZnXqVU35nFHWRaNSmof/b08zmjHqchgBTxxDCxZiGzry0e1JjgHq6z1BJn5",
+ "53YNWCu4TTLIQUcjLszv9etc/UUstuTkuCfh2G5dzvtyi02D2qqHHz5bbceI8o0y0gWxxxnDsp5d3vQp",
+ "zjV3kb1ZyEpoYrGQuUV9ZURfGdGthJvRh2eMfBPVPmziMu3d2VOfgxyrPkF1H5QxOsoXPb53svF9/Sem",
+ "79joLshI8MGGIXfR/JVFfGURt2MRP0DkMOKpdUwjQnTX04fGMgwMbMm6Ly2gwd83r3IqiYKxZo4jHNEZ",
+ "N+6Da9y3UhfFldXpKG8eo4ls4N3qeV9Z3leW9+dheUf7GU1bMLm1ZnQO24KWtT6k1pXOxGVg/0dYbDxO",
+ "32pdv/3W+nt+SZlOlkK6XAGsRNrvrIHmc1dIofNrkxTY+4KZjsGPgYU7/uu8LvQc/dh1HcS+OtO5b9T4",
+ "BkNfG/Lu2sv24ZPhu1gn0LH1xnV0OJ9jgO1aKD2fXE0/d9xK4cdP9R5/ri8Dt9dXn67+fwAAAP//ROgk",
+ "pRK6AAA=",
+}
+
+// GetSwagger returns the content of the embedded swagger specification file
+// or error if failed to decode
+func decodeSpec() ([]byte, error) {
+ zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
+ if err != nil {
+ return nil, fmt.Errorf("error base64 decoding spec: %s", err)
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(zipped))
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+ var buf bytes.Buffer
+ _, err = buf.ReadFrom(zr)
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+
+ return buf.Bytes(), nil
+}
+
+var rawSpec = decodeSpecCached()
+
+// a naive cached of a decoded swagger spec
+func decodeSpecCached() func() ([]byte, error) {
+ data, err := decodeSpec()
+ return func() ([]byte, error) {
+ return data, err
+ }
+}
+
+// Constructs a synthetic filesystem for resolving external references when loading openapi specifications.
+func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) {
+ var res = make(map[string]func() ([]byte, error))
+ if len(pathToFile) > 0 {
+ res[pathToFile] = rawSpec
+ }
+
+ return res
+}
+
+// GetSwagger returns the Swagger specification corresponding to the generated code
+// in this file. The external references of Swagger specification are resolved.
+// The logic of resolving external references is tightly connected to "import-mapping" feature.
+// Externally referenced files must be embedded in the corresponding golang packages.
+// Urls can be supported but this task was out of the scope.
+func GetSwagger() (swagger *openapi3.T, err error) {
+ var resolvePath = PathToRawSpec("")
+
+ loader := openapi3.NewLoader()
+ loader.IsExternalRefsAllowed = true
+ loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
+ var pathToFile = url.String()
+ pathToFile = path.Clean(pathToFile)
+ getSpec, ok := resolvePath[pathToFile]
+ if !ok {
+ err1 := fmt.Errorf("path not found: %s", pathToFile)
+ return nil, err1
+ }
+ return getSpec()
+ }
+ var specData []byte
+ specData, err = rawSpec()
+ if err != nil {
+ return
+ }
+ swagger, err = loader.LoadFromData(specData)
+ if err != nil {
+ return
+ }
+ return
+}
diff --git a/daemon/algod/api/server/v2/generated/participating/public/public_routes.yml b/daemon/algod/api/server/v2/generated/participating/public/public_routes.yml
new file mode 100644
index 000000000..b1dc1f675
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/participating/public/public_routes.yml
@@ -0,0 +1,19 @@
+package: public
+generate:
+ echo-server: true
+ embedded-spec: true
+output-options:
+ include-tags:
+ - participating
+ - public
+ exclude-tags:
+ - private
+ - common
+ - nonparticipating
+ type-mappings:
+ integer: uint64
+ skip-prune: true
+additional-imports:
+ - alias: "."
+ package: "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+output: ./server/v2/generated/participating/public/routes.go
diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go
new file mode 100644
index 000000000..541e80e66
--- /dev/null
+++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go
@@ -0,0 +1,414 @@
+// Package public provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
+package public
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+
+ . "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/oapi-codegen/pkg/runtime"
+ "github.com/getkin/kin-openapi/openapi3"
+ "github.com/labstack/echo/v4"
+)
+
+// ServerInterface represents all server handlers.
+type ServerInterface interface {
+ // Get a list of unconfirmed transactions currently in the transaction pool by address.
+ // (GET /v2/accounts/{address}/transactions/pending)
+ GetPendingTransactionsByAddress(ctx echo.Context, address string, params GetPendingTransactionsByAddressParams) error
+ // Broadcasts a raw transaction to the network.
+ // (POST /v2/transactions)
+ RawTransaction(ctx echo.Context) error
+ // Get a list of unconfirmed transactions currently in the transaction pool.
+ // (GET /v2/transactions/pending)
+ GetPendingTransactions(ctx echo.Context, params GetPendingTransactionsParams) error
+ // Get a specific pending transaction.
+ // (GET /v2/transactions/pending/{txid})
+ PendingTransactionInformation(ctx echo.Context, txid string, params PendingTransactionInformationParams) error
+}
+
+// ServerInterfaceWrapper converts echo contexts to parameters.
+type ServerInterfaceWrapper struct {
+ Handler ServerInterface
+}
+
+// GetPendingTransactionsByAddress converts echo context to params.
+func (w *ServerInterfaceWrapper) GetPendingTransactionsByAddress(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "address" -------------
+ var address string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetPendingTransactionsByAddressParams
+ // ------------- Optional query parameter "max" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
+ }
+
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetPendingTransactionsByAddress(ctx, address, params)
+ return err
+}
+
+// RawTransaction converts echo context to params.
+func (w *ServerInterfaceWrapper) RawTransaction(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.RawTransaction(ctx)
+ return err
+}
+
+// GetPendingTransactions converts echo context to params.
+func (w *ServerInterfaceWrapper) GetPendingTransactions(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetPendingTransactionsParams
+ // ------------- Optional query parameter "max" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
+ }
+
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetPendingTransactions(ctx, params)
+ return err
+}
+
+// PendingTransactionInformation converts echo context to params.
+func (w *ServerInterfaceWrapper) PendingTransactionInformation(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "txid" -------------
+ var txid string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "txid", runtime.ParamLocationPath, ctx.Param("txid"), &txid)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params PendingTransactionInformationParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.PendingTransactionInformation(ctx, txid, params)
+ return err
+}
+
+// This is a simple interface which specifies echo.Route addition functions which
+// are present on both echo.Echo and echo.Group, since we want to allow using
+// either of them for path registration
+type EchoRouter interface {
+ CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+ TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
+}
+
+// RegisterHandlers adds each server route to the EchoRouter.
+func RegisterHandlers(router EchoRouter, si ServerInterface, m ...echo.MiddlewareFunc) {
+ RegisterHandlersWithBaseURL(router, si, "", m...)
+}
+
+// Registers handlers, and prepends BaseURL to the paths, so that the paths
+// can be served under a prefix.
+func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string, m ...echo.MiddlewareFunc) {
+
+ wrapper := ServerInterfaceWrapper{
+ Handler: si,
+ }
+
+ router.GET(baseURL+"/v2/accounts/:address/transactions/pending", wrapper.GetPendingTransactionsByAddress, m...)
+ router.POST(baseURL+"/v2/transactions", wrapper.RawTransaction, m...)
+ router.GET(baseURL+"/v2/transactions/pending", wrapper.GetPendingTransactions, m...)
+ router.GET(baseURL+"/v2/transactions/pending/:txid", wrapper.PendingTransactionInformation, m...)
+
+}
+
+// Base64 encoded, gzipped, json marshaled Swagger object
+var swaggerSpec = []string{
+
+ "H4sIAAAAAAAC/+x9a5fbNpLoX8HV7jl+rCi1X5lxn5Ozt/1Ipndsx8fdycxdt28CkSUJ0yTAAGC3FF//",
+ "93tQAEiQBCX2I/Zk1p/sFvEoFAqFeuPjJBVFKThwrSaHHycllbQADRL/omkqKq4Tlpm/MlCpZKVmgk8O",
+ "/TeitGR8NZlOmPm1pHo9mU44LaBpY/pPJxJ+rZiEbHKoZQXTiUrXUFAzsN6WpnU90iZZicQNcWSHOH4x",
+ "+bTjA80yCUr1ofyB51vCeJpXGRAtKVc0NZ8UuWR6TfSaKeI6E8aJ4EDEkuh1qzFZMsgzNfOL/LUCuQ1W",
+ "6SYfXtKnBsREihz6cD4XxYJx8FBBDVS9IUQLksESG62pJmYGA6tvqAVRQGW6Jksh94BqgQjhBV4Vk8P3",
+ "EwU8A4m7lQK7wP8uJcBvkGgqV6AnH6axxS01yESzIrK0Y4d9CarKtSLYFte4YhfAiek1I68rpckCCOXk",
+ "3XfPyaNHj56ahRRUa8gckQ2uqpk9XJPtPjmcZFSD/9ynNZqvhKQ8S+r27757jvOfuAWObUWVgvhhOTJf",
+ "yPGLoQX4jhESYlzDCvehRf2mR+RQND8vYCkkjNwT2/hWNyWc/4vuSkp1ui4F4zqyLwS/Evs5ysOC7rt4",
+ "WA1Aq31pMCXNoO8PkqcfPj6YPjj49G/vj5L/dn8+efRp5PKf1+PuwUC0YVpJCTzdJisJFE/LmvI+Pt45",
+ "elBrUeUZWdML3HxaIKt3fYnpa1nnBc0rQycsleIoXwlFqCOjDJa0yjXxE5OK54ZNmdEctROmSCnFBcsg",
+ "mxrue7lm6ZqkVNkhsB25ZHluaLBSkA3RWnx1Ow7TpxAlBq5r4QMX9M+LjGZdezABG+QGSZoLBYkWe64n",
+ "f+NQnpHwQmnuKnW1y4qcroHg5OaDvWwRd9zQdJ5vicZ9zQhVhBJ/NU0JW5KtqMglbk7OzrG/W43BWkEM",
+ "0nBzWveoObxD6OshI4K8hRA5UI7I8+eujzK+ZKtKgiKXa9Brd+dJUKXgCohY/ANSbbb9v05+eEOEJK9B",
+ "KbqCtzQ9J8BTkQ3vsZs0doP/Qwmz4YValTQ9j1/XOStYBOTXdMOKqiC8KhYgzX75+0ELIkFXkg8BZEfc",
+ "Q2cF3fQnPZUVT3Fzm2lbgpohJabKnG5n5HhJCrr59mDqwFGE5jkpgWeMr4je8EEhzcy9H7xEiopnI2QY",
+ "bTYsuDVVCSlbMshIPcoOSNw0++Bh/GrwNJJVAI4fZBCcepY94HDYRGjGHF3zhZR0BQHJzMiPjnPhVy3O",
+ "gdcMjiy2+KmUcMFEpepOAzDi1LvFay40JKWEJYvQ2IlDh+Eeto1jr4UTcFLBNWUcMsN5EWihwXKiQZiC",
+ "CXcrM/0rekEVfPN46AJvvo7c/aXo7vrOHR+129gosUcyci+ar+7AxsWmVv8Ryl84t2KrxP7c20i2OjVX",
+ "yZLleM38w+yfR0OlkAm0EOEvHsVWnOpKwuEZv2/+Igk50ZRnVGbml8L+9LrKNTthK/NTbn96JVYsPWGr",
+ "AWTWsEa1KexW2H/MeHF2rDdRpeGVEOdVGS4obWmliy05fjG0yXbMqxLmUa3KhlrF6cZrGlftoTf1Rg4A",
+ "OYi7kpqG57CVYKCl6RL/2SyRnuhS/mb+Kcvc9NblMoZaQ8fuvkXbgLMZHJVlzlJqkPjOfTZfDRMAqyXQ",
+ "psUcL9TDjwGIpRQlSM3soLQsk1ykNE+UphpH+ncJy8nh5N/mjXFlbrureTD5K9PrBDsZedTKOAktyyuM",
+ "8dbINWoHszAMGj8hm7BsDyUixu0mGlJihgXncEG5njX6SIsf1Af4vZupwbcVZSy+O/rVIMKJbbgAZcVb",
+ "2/COIgHqCaKVIFpR2lzlYlH/cPeoLBsM4vejsrT4QNEQGEpdsGFKq3u4fNqcpHCe4xcz8n04NsrZgudb",
+ "czlYUcPcDUt3a7lbrDYcuTU0I95RBLdTyJnZGo8GI8PfBsWhzrAWuZF69tKKafwX1zYkM/P7qM5/DBIL",
+ "cTtMXKhFOcxZBQZ/CTSXux3K6ROOs+XMyFG37/XIxowSJ5hr0crO/bTj7sBjjcJLSUsLoPti71LGUQOz",
+ "jSysN+SmIxldFObgDAe0hlBd+6ztPQ9RSJAUOjA8y0V6/heq1rdw5hd+rP7xw2nIGmgGkqypWs8mMSkj",
+ "PF7NaGOOmGmI2jtZBFPN6iXe1vL2LC2jmgZLc/DGxRKLeuyHTA9kRHf5Af9Dc2I+m7NtWL8ddkZOkYEp",
+ "e5ydByEzqrxVEOxMpgGaGAQprPZOjNZ9JSifN5PH92nUHr20BgO3Q24RuENic+vH4JnYxGB4Jja9IyA2",
+ "oG6DPsw4KEZqKNQI+F44yATuv0MflZJu+0jGsccg2SzQiK4KTwMPb3wzS2N5PVoIeT3u02ErnDT2ZELN",
+ "qAHznXaQhE2rMnGkGLFJ2QadgRoX3m6m0R0+hrEWFk40/R2woMyot4GF9kC3jQVRlCyHWyD9dZTpL6iC",
+ "Rw/JyV+Onjx4+PPDJ98YkiylWElakMVWgyJ3nW5GlN7mcK+/MtSOqlzHR//msbdCtseNjaNEJVMoaNkf",
+ "ylo3rQhkmxHTro+1Nppx1TWAYw7nKRhObtFOrOHegPaCKSNhFYtb2YwhhGXNLBlxkGSwl5iuurxmmm24",
+ "RLmV1W2osiClkBH7Gh4xLVKRJxcgFRMRV8lb14K4Fl68Lbu/W2jJJVXEzI2m34qjQBGhLL3h4/m+Hfp0",
+ "wxvc7OT8dr2R1bl5x+xLG/nekqhICTLRG04yWFSrlia0lKIglGTYEe/oV2y11oHI8lYKsbz1Wzs6S2xJ",
+ "+MEKfLnp0xf73ogMjNpdqVtg781gDfYM5YQ4owtRaUIJFxmgjl6pOOMfcPSihwkdYzq8S/TaynALMPpg",
+ "Siuz2qok6Pbp0WLTMaGppaIEUaMG7OK1Q8O2stNZJ2IugWZGTwROxMIZn51ZHBdJ0WelPet0105Ec27B",
+ "VUqRglJGv7da217QfDtLlnoHnhBwBLiehShBllTeGNjzi71wnsM2QQ+rInf/+pO69wXg1ULTfA9isU0M",
+ "vbUK4TwMfajHTb+L4LqTh2RHJRDP+4y+YhhEDhqGUHglnAzuXxei3i7eHC0XINHW/7tSvJ/kZgRUg/o7",
+ "0/tNoa3KgbghJzqfsgItQZxyoSAVPFPRwXKqdLKPLZtGLfnerCDghDFOjAMPWCNfUaWtf4rxDNVqe53g",
+ "PNZMaaYYBnhQxDEj/+Slm/7YqbkHuapULeqoqiyF1JDF1sBhs2OuN7Cp5xLLYOxantKCVAr2jTyEpWB8",
+ "hyy7EosgqmszrnPg9heHxk5zz2+jqGwB0SBiFyAnvlWA3TB2YgAQphpEW8JhqkM5dcDGdKK0KEvDLXRS",
+ "8brfEJpObOsj/WPTtk9cVDf3dibAzK49TA7yS4tZGzWzpkYpw5FJQc+N7IEqlnWk9WE2hzFRjKeQ7KJ8",
+ "cyxPTKvwCOw5pAParYvLC2brHI4O/UaJbpAI9uzC0IIHVO23VGqWshIlxb/C9tYF5+4EUQMwyUBTZtS/",
+ "4IMVosuwP7Ge0e6Y1xOkR2lFffB7alFkOTlTeGG0gT+HLXqC3tqQm9MgUOcWNIHIqOZ0U04QUO/INwJM",
+ "2AQ2NNX51lxzeg1bcgkSiKoWBdPaxlC1FQUtyiQcIGpx2jGjM6/acBW/A2PsvSc4VLC8/lZMJ1ai2g3f",
+ "aUesaqHDSVKlEPkIT1sPGVEIRnniSCnMrjMXsufjujwltYB0Qgza1mvmeUe10IwrIP9HVCSlHAXWSkN9",
+ "IwiJbBavXzODucDqOZ3PrcEQ5FCAlcPxy/373YXfv+/2nCmyhEsf52oadtFx/z5qwW+F0q3DdQsmGHPc",
+ "jiO8HU1x5qJwMlyXp+z3+biRx+zk287gtf3OnCmlHOGa5d+YAXRO5mbM2kMaGefvwnFHWdmCoWPrxn3H",
+ "gIPfx0bTDB2Drj9x4KZtPg55ao18lW9vgU/bgYiEUoLCUxXqJcp+FcswFNodO7VVGoq+6cZ2/XlAsHnn",
+ "xYKelCl4zjgkheCwjWb/MA6v8WOstz3ZA52Rxw717YpNLfg7YLXnGUOFN8Uv7nZAym/rEIVb2PzuuB2r",
+ "XRgEjlop5CWhJM0Z6qyCKy2rVJ9xilJxcJYjrhwv6w/rSc99k7hiFtGb3FBnnKIbr5aVo+bnJUS04O8A",
+ "vLqkqtUKlO7IB0uAM+5aMU4qzjTOVZj9SuyGlSDRnzKzLQu6JUuao1r3G0hBFpVu35gYq6q00bqsCdFM",
+ "Q8TyjFNNcjAa6GvGTzc4nA8J9TTDQV8KeV5jYRY9DyvgoJhK4i6n7+1XjAZwy1+7yABMHLKfrdHJjN8E",
+ "tG41tJJh/u/d/zx8f5T8N01+O0ie/sf8w8fHn+7d7/348NO33/6/9k+PPn177z//PbZTHvZYJKWD/PiF",
+ "kyaPX6DI0FiderB/NotDwXgSJbLTNZCCcQzI79AWuWsEH09A9xqzntv1M6433BDSBc1ZRvX1yKHL4npn",
+ "0Z6ODtW0NqKjQPq1fohFR6xEUtL0HD22kxXT62oxS0Ux91L0fCVqiXqeUSgEx2/ZnJZsrkpI5xcP9lzp",
+ "N+BXJMKuOkz22gJB398bj35Gg6oLaMaTt6y4JYpKOaMuBvd5v5tYTusId5vZekgw/HlNvdPY/fnwyTeT",
+ "aRO2XH83mrr9+iFyJli2iQWnZ7CJSWruqOERu6NISbcKdJwPIexRF6P1S4XDFmBEfLVm5efnOUqzRZxX",
+ "+pApp/Ft+DG3sUzmJKJ5duusPmL5+eHWEiCDUq9jGW8tmQNbNbsJ0HGZlVJcAJ8SNoNZV+PKVqC8szMH",
+ "usTMKzQxijEhoPU5sITmqSLAeriQUWpNjH5QTHZ8/9N04sQIdeuSvRs4Bld3ztoW6//Wgtz5/uUpmTvW",
+ "q+7YPAk7dBDZHrFkuODNljPVcDOb52sTRc74GX8BS8aZ+X54xjOq6XxBFUvVvFIgn9Gc8hRmK0EOfTzo",
+ "C6rpGe/JbIOp+EEkLimrRc5Sch7K1g152vTK/ghnZ+8Nxz87+9DzK/UlYTdVlL/YCZJLptei0onLH0sk",
+ "XFKZRUBXdf4QjmyzP3fNOiVubMuKXX6aGz/O82hZqm4eQX/5ZZmb5QdkqFyUvNkyorSQXqoxoo6FBvf3",
+ "jXAXg6SXPvmwUqDILwUt3zOuP5DkrDo4eASkFVj/ixMeDE1uS2jZvK6V59C1d+HCrYYEGy1pUtIVqOjy",
+ "NdASdx8l7wKtq3lOsFsroN8HLOFQzQI8PoY3wMJx5eBkXNyJ7eULAcSXgJ9wC7GNETcap8V19ysI8b/2",
+ "dnXSBHq7VOl1Ys52dFXKkLjfmTo/eGWELO9JUmzFzSFwqdQLIOka0nPIMKsTilJvp63u3lnpRFbPOpiy",
+ "2c82QBdT9NA8uABSlRl1Qj3l226ulAKtfYLYOziH7aloMvyukhzVztVRQwcVKTWQLg2xhsfWjdHdfOcR",
+ "x/yEsvQpLxj77MnisKYL32f4IFuR9xYOcYwoWrkkQ4igMoIIS/wDKLjGQs14NyL92PKMvrKwN18kWdrz",
+ "fuKaNGqYc16Hq8EUGfu9ACylIC4VWVAjtwtXBcDmowRcrFJ0BQMScmihHZn10bLq4iD77r3oTSeW3Qut",
+ "d99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6AXAFM4LFfRzCFjmKSXVsh2U6VLYs5bZayRBocQIGyRuBw4PR",
+ "xkgo2ayp8gUKsI6DP8ujZIDfMb9qV1btceBtD4o11Dmznud2z2lPu3S5tT6h1mfRhqrliIxYI+FjgF9s",
+ "OwRHASiDHFZ24baxJ5Qm16vZIAPHD8tlzjiQJOa4p0qJlNkKE8014+YAIx/fJ8Qak8noEWJkHICNzi0c",
+ "mLwR4dnkq6sAyV2uGvVjo1ss+BviYbU2lM2IPKI0LJzxgaBJzwGoi/ao769OzBEOQxifEsPmLmhu2JzT",
+ "+JpBesmdKLZ2Ujmde/XekDi7w5ZvL5YrrcleRddZTSgzeaDjAt0OiBdik9i4+qjEu9gsDL1Ho/swyj92",
+ "MG0a7R1FFmKDLnu8Wmw02R5YhuHwYAQa/oYppFfsN3SbW2B2TbtbmopRoUKScea8mlyGxIkxUw9IMEPk",
+ "cjfIjL0WAB1jR1NDzim/e5XUtnjSv8ybW23aVHzwgdOx4z90hKK7NIC/vhWmzmV925VYonaKtue5ncYb",
+ "iJAxojdsou/u6TuVFOSASkHSEqKS85gT0Og2gDfOie8WGC8wWZjy7b0gnEHCiikNjTneXMzev/S5zZMU",
+ "a5QIsRxenS7l0qzvnRD1NWWT4LFja5mffQUXQkOyZFLpBH0Z0SWYRt8pVKq/M03jslI7YMKW62JZnDfg",
+ "tOewTTKWV3F6dfP+9YWZ9k3NElW1QH7LOAGarskCy8tFw6h2TG0j7XYu+JVd8Ct6a+sddxpMUzOxNOTS",
+ "nuMPci46nHcXO4gQYIw4+rs2iNIdDBJlnxeQ61gGZCA32cOZmYazXdbX3mHK/Nh7A1AsFMN3lB0pupbA",
+ "YLBzFQzdREYsYTqoztbP6hk4A7QsWbbp2ELtqIMaM72SwcOXvehgAXfXDbYHA4HdMxZYLEG1K5w0Ar6t",
+ "s9dKMJ6Nwsxpuw5JyBDCqZjyVWL7iKoTD/bh6hRo/lfY/mTa4nImn6aTm5lOY7h2I+7B9dt6e6N4Rie/",
+ "NaW1PCFXRDktSykuaJ44A/MQaUpx4UgTm3t79GdmdXEz5unLo1dvHfifppM0ByqTWlQYXBW2K/8wq7LF",
+ "VAYOiK9CaXQ+L7NbUTLY/LoCRGiUvlyDq/gXSKO90kSNwyE4is5IvYzHGu01OTvfiF3iDh8JlLWLpDHf",
+ "WQ9J2ytCLyjLvd3MQzsQF4SLG1ffKsoVwgFu7F0JnGTJrbKb3umOn46GuvbwpHCuHTUJC1t2UxHBuy50",
+ "I0KiOQ5JtaBYWMhaRfrMiVcFWhISlbM0bmPlC2WIg1vfmWlMsPGAMGpGrNiAK5ZXLBjLNFMjFN0OkMEc",
+ "UWT6IlVDuFsIVy+94uzXCgjLgGvzSeKp7BxUrOTkrO3969TIDv253MDWQt8MfxMZIyyq1b3xEIjdAkbo",
+ "qeuB+6JWmf1Ca4uU+SFwSVzB4R/O2LsSdzjrHX04arZhkOu2xy0sb97nf4YwbCnM/bXVvfLqqnsNzBGt",
+ "lc5UspTiN4jreageR7IOfBkxhlEuvwGfRZK3uiymtu40Jd+b2Qe3e0i6Ca1Q7SCFAarHnQ/ccljPyFuo",
+ "KbdbbUsXt2Ld4gQTxqfO7fgNwTiYezG9Ob1c0FixJyNkGJiOGgdwy5auBfGdPe6d2Z+5ym4zEviS67bM",
+ "5uOVIJuEoH5u/zUFBjvtaFGhkQyQakOZYGr9f7kSkWEqfkm5rYBt+tmj5HorsMYv0+tSSMymVXGzfwYp",
+ "K2gelxyytG/izdiK2frPlYKgwLAbyBbOt1TkijRbF3uDmuMlOZgGJczdbmTsgim2yAFbPLAtFlQhJ68N",
+ "UXUXszzgeq2w+cMRzdcVzyRkeq0sYpUgtVCH6k3tvFqAvgTg5ADbPXhK7qLbTrELuGew6O7nyeGDp2h0",
+ "tX8cxC4AV+h9FzfJkJ38zbGTOB2j39KOYRi3G3UWzQ21r3MMM64dp8l2HXOWsKXjdfvPUkE5XUE8UqTY",
+ "A5Pti7uJhrQOXnhmS8srLcWWMB2fHzQ1/Gkgjt2wPwsGSUVRMF04544ShaGnpnqwndQPZ+vUu8JvHi7/",
+ "EX2kpXcRdZTIz2s0tfdbbNXoyX5DC2ijdUqoTaHOWRO94MtRkmNfoQEr4dUF8CxuzFxm6SjmYDDDkpSS",
+ "cY2KRaWXyZ9JuqaSpob9zYbATRbfPI5U/2tXoeJXA/yz412CAnkRR70cIHsvQ7i+5C4XPCkMR8nuNXkj",
+ "wakcdObG3XZDvsPdQ48VyswoySC5VS1yowGnvhHh8R0D3pAU6/VciR6vvLLPTpmVjJMHrcwO/fjulZMy",
+ "CiFjZZea4+4kDglaMrjA2L34Jpkxb7gXMh+1CzeB/st6HrzIGYhl/izHFIFnIqKd+oqUtSXdxapHrAND",
+ "x9R8MGSwcENNSbv63+d3+nnjc9/5ZL54WPGPLrBfeEsRyX4FA5sYVCaNbmdWfw/835Q8E5uxm9o5IX5j",
+ "/wlQE0VJxfLspya/s1P4VVKerqP+rIXp+HPzREW9OHs/RasbrSnnkEeHs7Lgz15mjEi1/xBj5ykYH9m2",
+ "W4vWLrezuAbwNpgeKD+hQS/TuZkgxGo74a0OqM5XIiM4T1NKp+Ge/RrGQaXJXytQOpY8hB9sUBfaLY2+",
+ "awsdEuAZaosz8r19Ym4NpFXpA7U0VlS5rRoB2QqkM6hXZS5oNiVmnNOXR6+IndX2sYXWbaHFFSop7VV0",
+ "7FVBlbBx4cG+Zno8dWH8OLtjqc2qlcbCO0rTooylmZoWp74B5rKGNnxUX0LszMgLqzkqr5fYSQw9LJks",
+ "jMZVj2ZlF6QJ8x+tabpGlazFUodJfnyFUE+VKniVp66uX5fOwnNn4HZFQm2N0CkRRm++ZMq+LAYX0M5s",
+ "rdO8nUnAZ7q2lycrzi2lRGWPXWUIroN2D5wN1PBm/ihkHcRfUSC3BXavWjD1BHtFa9F0q6/2nuOx2Y11",
+ "1XT/YmRKueAsxUowsavZvVI2xgc2omhO18jqj7g7oZHDFa35WofJOSwOVoH1jNAhrm+ED76aTbXUYf/U",
+ "+BzWmmqyAq0cZ4Ns6ksXOzsg4wpcKTR8sC7gk0K2/IrIIaOu6qR2aVyRjDAtZkCx+858e+PUfowXP2cc",
+ "BXyHNheabi11+IiSNloB02QlQLn1tHOD1XvTZ4ZpshlsPsz8o0s4hnXLmWVbH3R/qCPvkXYeYNP2uWlr",
+ "i6I0P7cikO2kR2XpJh0ubB2VB/SGDyI44llMvGsnQG49fjjaDnLbGUqC96khNLhARzSUeA/3CKMu8tx5",
+ "QMAIrZaisAWxIVzRWgiMR8B4xTg0T4JFLog0eiXgxuB5HeinUkm1FQFH8bRToDl6n2MMTWnnerjpUJ0N",
+ "RpTgGv0cw9vY1KceYBx1g0Zwo3xbv0RmqDsQJp7jE4gOkf1q0yhVOSEqw4yCTv3pGOMwjNtXuG9fAP1j",
+ "0JeJbHctqT05V7mJhpJEF1W2Ap3QLIvVkHyGXwl+JVmFkgNsIK3qGnxlSVKsrtIuN9OnNjdRKriqih1z",
+ "+QY3nC4VMTn6DU6gfMpEM/iMIPs1rPfFy7fvXj4/On35wt4XiqjKZokamVtCYRjijBxzpcGIzpUC8kuI",
+ "xl+w3y+dBcfBDOrOR4g2rH3vCRFzZRZb/DdWJ2+YgFysyJWjFX1gCHa8snjfHqknnJujlyi2SsZjAq++",
+ "m6Ojmfp657Hpf6sHMherNiCfuYLFLmYc7lGMDb8091tY4KFX/NHegHX9BYwNFP61INRu68zhNvPEG7dX",
+ "DRJ9UvVrJLvtJMPvikzxjh6IEA7qdlArBlgn51CccDoY1k61S7DTlOzklINJSzbIyKYn2UexowbeocAi",
+ "G1dkPvd6jxNge+oAjr0ToT5irQ/QX304LCkpcx78hln0MesC54etmrsOXbPB3UW4cPRBw2L8cYfhEjpN",
+ "2Ry8BkqhWFOwNvbqw8hwqVN8uCEoAdQfy8cqXECqjVAf+GAlwFUKApnJgjdqvpbSGVA/6qgyV0FnV9mc",
+ "fmniPcyml9kSZGfZsq6z8UVijupIG/T/4ysxK+DumZh2zProyNnlElLNLvZkEv3NaKlNlsrU67H2ubcg",
+ "sYjVkZj+Gf4rqtcNQLsSfXbCE5SWuzE4Q3kE57C9o0iLGqJ1Zqee512nBgFiALlDYkhEqJgn2xrenHOR",
+ "qZoyEAs+csR2h6aa02CB/yAv7ppzeZIkNMyV2zHlhYhp7qPmMl2vlEGKQYVDyUb9EtvDgtALrGiu6sd3",
+ "6nf2A62GHPcrvV26GgiY91Xbmn01BFD+N5/kaWfJ2TmETxCgZf+Sysy3iKqqXgtOdtxHvQwhXx66C/Sy",
+ "npk1cX79nJBI7SCM5kxzoRhfJUMhse3QuvDtVwwgwOsAa5cjXEuQ7qkWNCHnQkGihY8L3AXHLlS4d0qv",
+ "gwQ1WK/PAjdYReNdUyYEK6BSrJpBXXBEuECjt1IDnQyKeQzPuQvZz+13nwThK2CO0MgdvSZ7q3H4CE+m",
+ "ekgMqX5J3G25P7niOlov49w+NaZilT24QWVoPS6lyKrUXtDhwWhsDGPr5uxgJVGFMe2vsif751hF6lWQ",
+ "qnYO27mVv9M15U05r/axtiKUXUOQGt7Z7Vs1CMR1n3xlF7C6FTi/pFI9nZRC5MmAufi4X6CkewbOWXoO",
+ "GTF3h4+NGijyT+6ilbL2B16ut74gR1kCh+zejBCjlhel3nrXYLvWbmdyfkfvmn+Ds2aVrRnk9P3ZGY+H",
+ "9WE1H3lD/uaH2c3VFBjmd8Op7CB7yl9sBoqjSHoZefJi7IvGEWdd9xmChqgsFDEp5Zq50KPOd1/nj5B+",
+ "UId/t/YTlkpoYrCkNR2htOQNOl3h5XVjERr3IoDvsAe8UCkO3gTw3MiB84UDpV7XSAmWMkgJreXv07P9",
+ "Q9w1Xwq2SGFkvVmmLVxjneztfQmMKOp5bZuI47lvwsC6CIJjrZi+6UOhKRFLzoaEY86lvKD55zdfYMGM",
+ "I8SHe9gqvtBQ/w2RbFGprhet8IqOmjvQdW9vav4WzS1/A7NHURuwG8rZUeu3GHwJSSyNRnOSi+ZNFhyS",
+ "XOKY1mj84BuycJHWpYSUKdZJQrn01TBrdQ+LQzfvne3WL/et8yehb0DGTkEQJXnTVNbTAu+HBsLmiH5h",
+ "pjJwcqNUHqO+HllE8BfjUWHK857r4rxlTbaVSjvRHELCLVuVAzf2Fa3K/WTuscvDdeClUynor3P0bd3C",
+ "beSibtY21iXSR+6u8mtjPBnxqoqmO7pSLEKwJClBUMkvD34hEpb45oAg9+/jBPfvT13TXx62P5vjfP9+",
+ "VIz7bE6U1tPvbt4Yxfw0FP1nI9wGAk07+1GxPNtHGK2w4eb9DwyM/dklDnyRF0h+tvbU/lF1tduv4r7t",
+ "bgIiJrLW1uTBVEFA8IhYYNdtFn2cX0FaSaa3WM/Am9/Yz9E6Ud/XFnvn8akzYN3dp8U51BUxGvt+pfzt",
+ "+r2wj/kXRqZG57nGx+BebmhR5uAOyrd3Fn+CR39+nB08evCnxZ8Pnhyk8PjJ04MD+vQxffD00QN4+Ocn",
+ "jw/gwfKbp4uH2cPHDxePHz7+5snT9NHjB4vH3zz90x3DhwzIFtCJz56b/B2f6UmO3h4npwbYBie0ZPUb",
+ "kIaM/QsBNMWTCAVl+eTQ//S//QmbpaJohve/TlxyzmStdakO5/PLy8tZ2GW+QoNeokWVrud+nv7be2+P",
+ "6wBrm/CNO2pjZw0p4KY6UjjCb+9enpySo7fHs4ZgJoeTg9nB7AG+rFUCpyWbHE4e4U94eta473NHbJPD",
+ "j5+mk/kaaI7+L/NHAVqy1H9Sl3S1AjlzTyWYny4ezr0oMf/ojJmfdn2bh1VH5x9bNt9sT0+sSjj/6JPt",
+ "d7duZbM7W7dZ7ipWguJ7CJ4eDEoit2xti603106Jqt+nLSUT5iRNzbWYQSqBIt0LiQHOzSOGTn8B+yDv",
+ "66O/o7X99dHfybfkYOri3hWqGrHprT2jJoHjzIIdeWTz2fao9h4EpbgO38fexYw94YBHyNBHQOH1iA0H",
+ "07KCsERUw48Njz1Inn74+OTPn2JyXv8RMo+kgUcwtfAJ6Yi0gm6+HULZxp4OXMOvFchts4iCbiYhwH0f",
+ "TOQ9siVbVbLzdHEdSuIq+TNF/uvkhzdESOL02rc0PQ9DpWPguPsshMgXVnYB1YVale3owxqHHzBDFaHA",
+ "U/zw4ODrU63/M55qnba21tPI1939+hDvv8ZDvI+vyMp2modbwYGjzs5Vhutt1mu6qSuRUMIFTziWiL8A",
+ "Euh5jw8e/GFXeMwxwsXImsTK0p+mkyd/4C075kZqoTnBlnY1j/6wqzkBecFSIKdQlEJSyfIt+ZHXKW5B",
+ "WZs++/uRn3NxyT0ijJpYFQWVWych05rnVDxIOtzJf3rOwUaKRi5KVwr9SCh/TlpPofDV5MMnL+CP1Bp2",
+ "NZsvMOd+bFNQQeNh1QMdAmr+EU3ag7/PXSJy/CO6FqzOOveBTPGWLa3mo94YWDs9UqrTdVXOP+J/UIcM",
+ "wLKJuHP76nDzc/cFmtjP84/tCsgtNKh1pTNxGfRFU7f10/SxU78J0vp7fkmZNve6iyHDClX9zhpoPncJ",
+ "dp1fm2Dx3heMgA9+7EgCpcvdaGtY7+hlKFfYOx6Ufiay7Q4esUkWjOPBCQ92Y8CyH/tSff+50zXYwo7e",
+ "BxgRm7QgCylollKFhY9cKmpPV/t0Q5Xhj/i4++8pS/QgekYz4pPmE/Ka5mbDISNHTmJtYeP3lgO+/MX9",
+ "hW/az3Y1PvOHTxGKARedwxmkh4+58oyOY876CnjiuE2yENnWV7qU9FJvbCRGl4/N65Kl0Y+3YAT757Z8",
+ "7TN4fbUzfbUzfbVEfLUzfd3dr3amr1aYr1aY/7FWmKuYXmIypDM9DIuSWBOMtua1OhptcolqFh82mxKm",
+ "a4GrX/6R6Rkhp5ipQc0tARcgaY4lslWQelVgzJ6q0hQgOzzjSQsSGxlnJr7b/NeGJLoXiA/udfsozfI8",
+ "5M39vijM4iebcP4tOZucTXojYTEHyGwCaBi5bnvtHfZ/1eP+0EuCwdxBfPfSx9oTVS2XLGUW5bngK0JX",
+ "ogmnNXybcIFfACtN2FRiwvTUPbrAFLk0i3eV2doB9m2xvC8BHDdbuNcd3SGXuCfaEN4V3dD/McYH/a8r",
+ "gl836+emXHLn2D2W+ZVlfA6W8cWZxh/dwRfY+P4lZcjHB4//sAsKLcJvhCbfYRz4zWStupRlLF16tBTV",
+ "BIuGwZd4B9Zhl+8/GE6PhePd9djEEh7O55hxuRZKzyfm8mrHGYYfP9RA+YrCk1KyC6z88+HT/w8AAP//",
+ "Cjn3ciPIAAA=",
+}
+
+// GetSwagger returns the content of the embedded swagger specification file
+// or error if failed to decode
+func decodeSpec() ([]byte, error) {
+ zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
+ if err != nil {
+ return nil, fmt.Errorf("error base64 decoding spec: %s", err)
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(zipped))
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+ var buf bytes.Buffer
+ _, err = buf.ReadFrom(zr)
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing spec: %s", err)
+ }
+
+ return buf.Bytes(), nil
+}
+
+var rawSpec = decodeSpecCached()
+
+// a naive cached of a decoded swagger spec
+func decodeSpecCached() func() ([]byte, error) {
+ data, err := decodeSpec()
+ return func() ([]byte, error) {
+ return data, err
+ }
+}
+
+// Constructs a synthetic filesystem for resolving external references when loading openapi specifications.
+func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) {
+ var res = make(map[string]func() ([]byte, error))
+ if len(pathToFile) > 0 {
+ res[pathToFile] = rawSpec
+ }
+
+ return res
+}
+
+// GetSwagger returns the Swagger specification corresponding to the generated code
+// in this file. The external references of Swagger specification are resolved.
+// The logic of resolving external references is tightly connected to "import-mapping" feature.
+// Externally referenced files must be embedded in the corresponding golang packages.
+// Urls can be supported but this task was out of the scope.
+func GetSwagger() (swagger *openapi3.T, err error) {
+ var resolvePath = PathToRawSpec("")
+
+ loader := openapi3.NewLoader()
+ loader.IsExternalRefsAllowed = true
+ loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
+ var pathToFile = url.String()
+ pathToFile = path.Clean(pathToFile)
+ getSpec, ok := resolvePath[pathToFile]
+ if !ok {
+ err1 := fmt.Errorf("path not found: %s", pathToFile)
+ return nil, err1
+ }
+ return getSpec()
+ }
+ var specData []byte
+ specData, err = rawSpec()
+ if err != nil {
+ return
+ }
+ swagger, err = loader.LoadFromData(specData)
+ if err != nil {
+ return
+ }
+ return
+}
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
deleted file mode 100644
index 2603a9022..000000000
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ /dev/null
@@ -1,495 +0,0 @@
-// Package private provides primitives to interact the openapi HTTP API.
-//
-// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
-package private
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/base64"
- "fmt"
- "github.com/algorand/oapi-codegen/pkg/runtime"
- "github.com/getkin/kin-openapi/openapi3"
- "github.com/labstack/echo/v4"
- "net/http"
- "strings"
-)
-
-// ServerInterface represents all server handlers.
-type ServerInterface interface {
- // Aborts a catchpoint catchup.
- // (DELETE /v2/catchup/{catchpoint})
- AbortCatchup(ctx echo.Context, catchpoint string) error
- // Starts a catchpoint catchup.
- // (POST /v2/catchup/{catchpoint})
- StartCatchup(ctx echo.Context, catchpoint string) error
- // Return a list of participation keys
- // (GET /v2/participation)
- GetParticipationKeys(ctx echo.Context) error
- // Add a participation key to the node
- // (POST /v2/participation)
- AddParticipationKey(ctx echo.Context) error
- // Delete a given participation key by ID
- // (DELETE /v2/participation/{participation-id})
- DeleteParticipationKeyByID(ctx echo.Context, participationId string) error
- // Get participation key info given a participation ID
- // (GET /v2/participation/{participation-id})
- GetParticipationKeyByID(ctx echo.Context, participationId string) error
- // Append state proof keys to a participation key
- // (POST /v2/participation/{participation-id})
- AppendKeys(ctx echo.Context, participationId string) error
-
- // (POST /v2/shutdown)
- ShutdownNode(ctx echo.Context, params ShutdownNodeParams) error
-}
-
-// ServerInterfaceWrapper converts echo contexts to parameters.
-type ServerInterfaceWrapper struct {
- Handler ServerInterface
-}
-
-// AbortCatchup converts echo context to params.
-func (w *ServerInterfaceWrapper) AbortCatchup(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "catchpoint" -------------
- var catchpoint string
-
- err = runtime.BindStyledParameter("simple", false, "catchpoint", ctx.Param("catchpoint"), &catchpoint)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AbortCatchup(ctx, catchpoint)
- return err
-}
-
-// StartCatchup converts echo context to params.
-func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "catchpoint" -------------
- var catchpoint string
-
- err = runtime.BindStyledParameter("simple", false, "catchpoint", ctx.Param("catchpoint"), &catchpoint)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter catchpoint: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.StartCatchup(ctx, catchpoint)
- return err
-}
-
-// GetParticipationKeys converts echo context to params.
-func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetParticipationKeys(ctx)
- return err
-}
-
-// AddParticipationKey converts echo context to params.
-func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AddParticipationKey(ctx)
- return err
-}
-
-// DeleteParticipationKeyByID converts echo context to params.
-func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "participation-id" -------------
- var participationId string
-
- err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.DeleteParticipationKeyByID(ctx, participationId)
- return err
-}
-
-// GetParticipationKeyByID converts echo context to params.
-func (w *ServerInterfaceWrapper) GetParticipationKeyByID(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "participation-id" -------------
- var participationId string
-
- err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetParticipationKeyByID(ctx, participationId)
- return err
-}
-
-// AppendKeys converts echo context to params.
-func (w *ServerInterfaceWrapper) AppendKeys(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "participation-id" -------------
- var participationId string
-
- err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AppendKeys(ctx, participationId)
- return err
-}
-
-// ShutdownNode converts echo context to params.
-func (w *ServerInterfaceWrapper) ShutdownNode(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "timeout": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params ShutdownNodeParams
- // ------------- Optional query parameter "timeout" -------------
- if paramValue := ctx.QueryParam("timeout"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "timeout", ctx.QueryParams(), &params.Timeout)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter timeout: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.ShutdownNode(ctx, params)
- return err
-}
-
-// RegisterHandlers adds each server route to the EchoRouter.
-func RegisterHandlers(router interface {
- CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
-}, si ServerInterface, m ...echo.MiddlewareFunc) {
-
- wrapper := ServerInterfaceWrapper{
- Handler: si,
- }
-
- router.DELETE("/v2/catchup/:catchpoint", wrapper.AbortCatchup, m...)
- router.POST("/v2/catchup/:catchpoint", wrapper.StartCatchup, m...)
- router.GET("/v2/participation", wrapper.GetParticipationKeys, m...)
- router.POST("/v2/participation", wrapper.AddParticipationKey, m...)
- router.DELETE("/v2/participation/:participation-id", wrapper.DeleteParticipationKeyByID, m...)
- router.GET("/v2/participation/:participation-id", wrapper.GetParticipationKeyByID, m...)
- router.POST("/v2/participation/:participation-id", wrapper.AppendKeys, m...)
- router.POST("/v2/shutdown", wrapper.ShutdownNode, m...)
-
-}
-
-// Base64 encoded, gzipped, json marshaled Swagger object
-var swaggerSpec = []string{
-
- "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/ar8uOGM5NeuVbX1nWI5WV0cR2Up2bvP9iUYsmcGKxJgAFDSxKf/",
- "/QoNgARJcIZ6rHKp80+2hng0Go1Gv/FlkoqiFBy4VpODL5OSSlqABol/0TQVFdcJy8xfGahUslIzwScH",
- "/htRWjK+mkwnzPxaUr2eTCecFtC0Mf2nEwm/VUxCNjnQsoLpRKVrKKgZWG9K07oe6SpZicQNcWiHOD6a",
- "XG/5QLNMglJ9KH/k+YYwnuZVBkRLyhVNzSdFLpleE71mirjOhHEiOBCxJHrdakyWDPJMzfwif6tAboJV",
- "usmHl3TdgJhIkUMfzjeiWDAOHiqogao3hGhBMlhiozXVxMxgYPUNtSAKqEzXZCnkDlAtECG8wKticvBx",
- "ooBnIHG3UmAX+N+lBPgdEk3lCvTk8zS2uKUGmWhWRJZ27LAvQVW5VgTb4hpX7AI4Mb1m5IdKabIAQjn5",
- "8O0b8vz589dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeYrISnPkrr9h2/f4PynboFjW1GlIH5YDs0X",
- "cnw0tADfMUJCjGtY4T60qN/0iByK5ucFLIWEkXtiG9/rpoTz/6G7klKdrkvBuI7sC8GvxH6O8rCg+zYe",
- "VgPQal8aTEkz6Me95PXnL/vT/b3rv3w8TP7L/fny+fXI5b+px92BgWjDtJISeLpJVhIonpY15X18fHD0",
- "oNaiyjOyphe4+bRAVu/6EtPXss4LmleGTlgqxWG+EopQR0YZLGmVa+InJhXPDZsyozlqJ0yRUooLlkE2",
- "Ndz3cs3SNUmpskNgO3LJ8tzQYKUgG6K1+Oq2HKbrECUGrlvhAxf0/y4ymnXtwARcITdI0lwoSLTYcT35",
- "G4fyjIQXSnNXqZtdVuRsDQQnNx/sZYu444am83xDNO5rRqgilPiraUrYkmxERS5xc3J2jv3dagzWCmKQ",
- "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFLlcg167O0+CKgVXQMTiX5Bqs+3/4/TH90RI8gMo",
- "RVdwQtNzAjwV2fAeu0ljN/i/lDAbXqhVSdPz+HWds4JFQP6BXrGiKgivigVIs1/+ftCCSNCV5EMA2RF3",
- "0FlBr/qTnsmKp7i5zbQtQc2QElNlTjczcrwkBb36+97UgaMIzXNSAs8YXxF9xQeFNDP3bvASKSqejZBh",
- "tNmw4NZUJaRsySAj9ShbIHHT7IKH8ZvB00hWATh+kEFw6ll2gMPhKkIz5uiaL6SkKwhIZkZ+cpwLv2px",
- "DrxmcGSxwU+lhAsmKlV3GoARp94uXnOhISklLFmExk4dOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
- "TLhdmelf0Quq4NWLoQu8+Tpy95eiu+tbd3zUbmOjxB7JyL1ovroDGxebWv1HKH/h3IqtEvtzbyPZ6sxc",
- "JUuW4zXzL7N/Hg2VQibQQoS/eBRbcaorCQef+FPzF0nIqaY8ozIzvxT2px+qXLNTtjI/5fand2LF0lO2",
- "GkBmDWtUm8Juhf3HjBdnx/oqqjS8E+K8KsMFpS2tdLEhx0dDm2zHvClhHtaqbKhVnF15TeOmPfRVvZED",
- "QA7irqSm4TlsJBhoabrEf66WSE90KX83/5RlbnrrchlDraFjd9+ibcDZDA7LMmcpNUj84D6br4YJgNUS",
- "aNNijhfqwZcAxFKKEqRmdlBalkkuUponSlONI/2HhOXkYPKXeWNcmdvuah5M/s70OsVORh61Mk5Cy/IG",
- "Y5wYuUZtYRaGQeMnZBOW7aFExLjdRENKzLDgHC4o17NGH2nxg/oAf3QzNfi2oozFd0e/GkQ4sQ0XoKx4",
- "axs+UiRAPUG0EkQrSpurXCzqHx4flmWDQfx+WJYWHygaAkOpC66Y0uoJLp82Jymc5/hoRr4Lx0Y5W/B8",
- "Yy4HK2qYu2Hpbi13i9WGI7eGZsRHiuB2CjkzW+PRYGT4+6A41BnWIjdSz05aMY3/4dqGZGZ+H9X5z0Fi",
- "IW6HiQu1KIc5q8DgL4Hm8rhDOX3CcbacGTns9r0d2ZhR4gRzK1rZup923C14rFF4KWlpAXRf7F3KOGpg",
- "tpGF9Y7cdCSji8IcnOGA1hCqW5+1nechCgmSQgeGb3KRnv+DqvU9nPmFH6t//HAasgaagSRrqtazSUzK",
- "CI9XM9qYI2YaovZOFsFUs3qJ97W8HUvLqKbB0hy8cbHEoh77IdMDGdFdfsT/0JyYz+ZsG9Zvh52RM2Rg",
- "yh5n50HIjCpvFQQ7k2mAJgZBCqu9E6N13wjKN83k8X0atUdvrcHA7ZBbhFl6Yw48XAh5uyPRoXVOGiMn",
- "oWbUgCNMOzuLTasycfiJGEpsg85AjV9pOyV3h4/hqoWFU03/DVhQZtT7wEJ7oPvGgihKlsM9nNd1lBMZ",
- "zfX5M3L6j8OX+89+efbyleEapRQrSQuy2GhQ5LFTGIjSmxye9FeGInuV6/jor15401h73Ng4SlQyhYKW",
- "/aGsyc3ey7YZMe36WGujGVddAzjmWJ6BYS8W7cRakw1oR0yZa79Y3MtmDCEsa2bJiIMkg53EdNPlNdNs",
- "wiXKjazuQ78CKYWMGH3wiGmRijy5AKmYiNjvT1wL4lp4mavs/m6hJZdUETM32iMrjrdchLL0FUfQmIZC",
- "7ZIZ7NBnV7zBjRuQSkk3PfTb9UZW5+Ydsy9t5HvzliIlyERfcZLBolq1xPOlFAWhJMOOeHG8Y6u1Du7R",
- "EynE8t4lqugssSXhByuF5KZPXxZ5LzIwumCl7oG9N4M12DOUE+KMLkSlCSVcZICKY6XijH/A+4huD/TW",
- "6PAu0WsrWCzAKCkprcxqq5KgL6JHi03HhKaWihJEjRow1tZWdtvKTmc9W7kEmhnlBTgRC2cRdbZaXCRF",
- "R4r2rNNdOxF1rgVXKUUKShml06oSO0Hz7SxZ6i14QsAR4HoWogRZUnlLYLXQNN8BKLaJgVvLic6M3Id6",
- "3PTbNrA7ebiNVBq901KBEUrNgctBwxAKR+LkAiSaU/+t++cnue32VeVAsIMTrc5Ygeorp1woSAXPVHSw",
- "nCqd7Dq2plFL/jMrCE5K7KTiwAMmlHdUaWtUZzxDXcCyG5zH2lbMFMMAD16BZuSf/e3XHzs1fJKrStVX",
- "oarKUkgNWWwNHK62zPUeruq5xDIYu75vtSCVgl0jD2EpGN8hy67EIojq2vbkvE79xaGFxtwDmygqW0A0",
- "iNgGyKlvFWA3dPgOAGIUx7onEg5THcqpvczTidKiLM3500nF635DaDq1rQ/1T03bPnFR3fD1TICZXXuY",
- "HOSXFrPW1b+mRmjHkUlBz83dhCK4tf73YTaHMVGMp5Bso3xzLE9Nq/AI7DikA9qPCyYKZuscjg79Rolu",
- "kAh27MLQggdUsRMqNUtZiZLE97C5d8GqO0HUakUy0JQZ9SD4YIWsMuxPrDunO+btBK1RUnMf/J7YHFlO",
- "zhReGG3gz2GD5usTGydwFkQX3IOkGBnVnG7KCQLqvY/mQg6bwBVNdb4x15xew4ZcggSiqkXBtLaBH21B",
- "UosyCQeIWiS2zOhsQtbH7ndgjJHqFIcKltffiunEii3b4TvrCC4tdDiBqRQiH+Ee6CEjCsEo9wEphdl1",
- "5uKMfDCKp6QWkE6IQYNgzTwfqRaacQXkf4mKpJSjAFZpqG8EIZHN4vVrZjAXWD2ncxQ0GIIcCrByJX55",
- "+rS78KdP3Z4zRZZw6YPzTMMuOp4+RS3pRCjdOlz3oKKb43Yc4e1oqjEXhZPhujxlt6HajTxmJ086g9f2",
- "HXOmlHKEa5Z/ZwbQOZlXY9Ye0sg4Iz2OO8oKEwwdWzfuO3pJ/z06fDN0DLr+xIFvqfk45F4y8lW+uQc+",
- "bQciEkoJCk9VqJco+1Usw/hNd+zURmko+qq97frLgGDzwYsFPSlT8JxxSArBYRNNWWAcfsCPsd72ZA90",
- "Rh471LcrNrXg74DVnmcMFd4Vv7jbASmf1H7Ve9j87rgdq04YuYpaKeQloSTNGeqsgistq1R/4hSl4uAs",
- "R0z9XtYf1pPe+CZxxSyiN7mhPnGqDA5rWTlqnlxCRAv+FsCrS6parUDpjnywBPjEXSvGScWZxrkKs1+J",
- "3bASJNrbZ7ZlQTdkSXNU634HKcii0u0bEwPslDZalzUxmWmIWH7iVJMcjAb6A+NnVzicj2PzNMNBXwp5",
- "XmNhFj0PK+CgmEriLonv7Fd0Ybrlr507E7Md7GdrRDHjN1F4Gw2tCP7//fg/Dz4eJv9Fk9/3ktf/bf75",
- "y4vrJ097Pz67/vvf/0/7p+fXf3/yn/8R2ykPeyz8y0F+fOSkyeMjFBka41IP9gezOBSMJ1EiO1sDKRjH",
- "KOIObZHHRvDxBPSkMVO5Xf/E9RU3hHRBc5ZRfTty6LK43lm0p6NDNa2N6CiQfq2fYy7dlUhKmp6jR2+y",
- "YnpdLWapKOZeip6vRC1RzzMKheD4LZvTks1VCen8Yn/HlX4HfkUi7KrDZG8tEPT9gfGQTTRZuihMPHnL",
- "iluiqJQzUmJEkvfLiOW0Dsu16XgHBGM219Q7Fd2fz16+mkybWMv6u9HU7dfPkTPBsqtYRG0GVzFJzR01",
- "PGKPFCnpRoGO8yGEPeqCsn6LcNgCjIiv1qx8eJ6jNFvEeaWP83Aa3xU/5jYAw5xENM9unNVHLB8ebi0B",
- "Mij1Opam05I5sFWzmwAdl0opxQXwKWEzmHU1rmwFyjvDcqBLTBdBE6MYE7dWnwNLaJ4qAqyHCxml1sTo",
- "B8Vkx/evpxMnRqh7l+zdwDG4unPWtlj/txbk0Xdvz8jcsV71yAZ326GDcNyIJcNFnLWcbYab2eREG93+",
- "iX/iR7BknJnvB594RjWdL6hiqZpXCuQ3NKc8hdlKkAMfxHZENf3EezLbYP5wED5IymqRs5Sch7J1Q542",
- "J6w/wqdPHw3H//Tpc89z05eE3VRR/mInSC6ZXotKJy7pJZFwSWUWAV3VSQ84sk1Z2zbrlLixLSt2STVu",
- "/DjPo2WpusHP/eWXZW6WH5ChcqG9ZsuI0kJ6qcaIOhYa3N/3wl0Mkl76jKlKgSK/FrT8yLj+TJJP1d7e",
- "cyCtaOBfnfBgaHJTQsvmdavg7K69CxduNSS40pImJV2Bii5fAy1x91HyLtC6mucEu7WikH1ACw7VLMDj",
- "Y3gDLBw3jqjExZ3aXj57Ob4E/IRbiG2MuNE4LW67X0Fc8q23qxPb3NulSq8Tc7ajq1KGxP3O1EmNKyNk",
- "eU+SYituDoHL/1wASdeQnkOGqWhQlHozbXX3zkonsnrWwZRN2bRRhZhXhObBBZCqzKgT6infdBM8FGjt",
- "s1o+wDlszkSTlnSTjI52goEaOqhIqYF0aYg1PLZujO7mO8c3BlWXpY/Tx4BNTxYHNV34PsMH2Yq893CI",
- "Y0TRCoAfQgSVEURY4h9AwS0Wasa7E+nHlmf0lYW9+SIZnp73E9ekUcOc8zpcDcb12+8FYP63uFRkQY3c",
- "Llzqsg2iD7hYpegKBiTk0EI7MlS9ZdXFQXbde9GbTiy7F1rvvomCbBsnZs1RSgHzxZAKKjOdkAU/k3UC",
- "4ApmBCuSOIQtchST6mgJy3SobFnKbYmFIdDiBAySNwKHB6ONkVCyWVPls6ox+dyf5VEywL8xKWRbKuBx",
- "4G0PMszrRD/Pc7vntKdduoRAnwXoU/9C1XJEGp+R8DEALLYdgqMAlEEOK7tw29gTSpOg0myQgePH5TJn",
- "HEgSc9xTpUTKbFp8c824OcDIx08JscZkMnqEGBkHYKNzCwcm70V4NvnqJkByl2BD/djoFgv+hnjYpQ3N",
- "MiKPKA0LZ3wgqM5zAOqiPer7qxNzhMMQxqfEsLkLmhs25zS+ZpBeRhqKrZ38M+defTIkzm6x5duL5UZr",
- "slfRbVYTykwe6LhAtwXi7aJEbAsU4svZsmpcDd2lY6YeuL6HcPU4yGW7FQAdTb+p+uQ0v50aWvtu7t9k",
- "DUufNjnaPqo0RvtD9BPdpQH89U0QdfbZSfe6jirpbbdrO/EukJ9irNickb6vo+9RUZADSsRJS4JIzmMe",
- "MCPYA7LbU98t0NwxvY/yzZPAly9hxZSGxhZtbiXvXHlo2xzFqgJCLIdXp0u5NOv7IETNo23aKnZsLfPB",
- "V3AhNCRLJpVO0JAfXYJp9K1CjfJb0zQuKLSjBWyBHZbFeQNOew6bJGN5FadXN+/3R2ba97URRlWLc9ig",
- "OAg0XZMFFoSKxhBtmdqGmW1d8Du74Hf03tY77jSYpmZiacilPcef5Fx0OO82dhAhwBhx9HdtEKVbGCRe",
- "/EeQ61h6WCA02MOZmYazbabH3mHK/Ng7oy8sFMN3lB0pupZAW966CoY+EqPuMR3UU+qnPAycAVqWLLvq",
- "GALtqIPqIr2Rtu8T1TtYwN11g+3AQGD0i0XVSlDtmgSNdGsrY/FwbbNRmDlrVw4IGUI4FVO+rmMfUYa0",
- "sfjYLlydAc2/h83Ppi0uZ3I9ndzNbhjDtRtxB65P6u2N4hk93NaO1HID3BDltCyluKB54qyrQ6QpxYUj",
- "TWzujbEPzOriNryzt4fvThz419NJmgOVSS0qDK4K25V/mlXZ8gcDB8TXjTMKj5fZrSgZbH6dsx1aZC/X",
- "4Gp0BdJor5hIY20PjqKz0C7jgTY77a3OMWCXuMVBAGXtH2hsV9Y90HYJ0AvKcm808tAOBMXg4sZVpIly",
- "hXCAO7sWAg9Rcq/spne646ejoa4dPCmca0sVscIWylNE8K7/2IiQaItCUi0olgKxJoE+c+JVkZjjl6ic",
- "pXEDI18oQxzcOo5MY4KNB4RRM2LFBvyQvGLBWKaZGqHodoAM5ogi05eVGcLdQrgKxxVnv1VAWAZcm08S",
- "T2XnoGLtFWdq7l+nRnboz+UGtubpZvi7yBhhGZzujYdAbBcwQjdVD9yjWmX2C63NMeaHwB5/A293OGPv",
- "StziqXb04ajZxgCu2+6msCBxn/8ZwrDF63ZXQ/bKq6vHMzBHtLoxU8lSit8hruehehwJufeFfxiGePwO",
- "fBbJXOqymNq60xRpbmYf3O4h6Sa0QrU99ANUjzsf+KSwAok3z1Jut9oWG20FesUJJgzOnNvxG4JxMPcC",
- "WnN6uaCx8ixGyDAwHTbez5YhWQviO3vcO5s3c7WYZiRwpNZtmU1GK0E22TD9xOdbCgx22tGiQiMZINWG",
- "MsHUOr9yJSLDVPyScluz1vSzR8n1VmCNX6bXpZCYSqriNu8MUlbQPC45ZIj9duptxlbMVmytFAQlQd1A",
- "ttS1pSJXVtX6lxvUHC/J3jQoOux2I2MXTLFFDthi37ZYUIWcvDZE1V3M8oDrtcLmz0Y0X1c8k5DptbKI",
- "VYLUQh2qN7XnZgH6EoCTPWy3/5o8Rp+VYhfwxGDR3c+Tg/3XaHS1f+zFLgBXmnkbN8mQnfzTsZM4HaPT",
- "zo5hGLcbdRZNjLT19IcZ15bTZLuOOUvY0vG63WepoJyuIB4mUeyAyfbF3URDWgcvPLPFoJWWYkOYjs8P",
- "mhr+NBDEbdifBYOkoiiYLpxnQ4nC0FNT79NO6oezlaVdqSYPl/+IDsLS+0c6SuTDGk3t/RZbNbpx39MC",
- "2midEmrzh3PWuO59ATly7KsQYO2qumSVxY2ZyywdxRz05C9JKRnXqFhUepn8jaRrKmlq2N9sCNxk8epF",
- "pF5Xu0QPvxngD453CQrkRRz1coDsvQzh+pLHXPCkMBwle9IkTQSnctCTGY8W8xy9Gyy4feixQpkZJRkk",
- "t6pFbjTg1HciPL5lwDuSYr2eG9HjjVf24JRZyTh50Mrs0E8f3jkpoxAyVpOmOe5O4pCgJYMLDFyLb5IZ",
- "8457IfNRu3AX6P9Yz4MXOQOxzJ/lmCLwTcXy7OcmCaxT8lBSnq6jdv+F6fhLU3y7XrI9x9ESKGvKOeTR",
- "4eyd+Yu/WyO3/7/E2HkKxke27ZYytMvtLK4BvA2mB8pPaNDLdG4mCLHazoqpoy7zlcgIztPU22iorF+d",
- "MShX9lsFSscyDPCDjfxA+47RC2y1LAI8Q6l6Rr6zj+esgbTKAaA0y4oqt6nlkK1AOsNjVeaCZlNixjl7",
- "e/iO2FltH1tC1lbrWqEw115FR68PivOMiyH01WDj8c3jx9kecGlWrTRW51CaFmUsF820OPMNMOEttHWi",
- "mBdiZ0aOrIStvPxmJzH0sGSyMJJpPZrl8UgT5j9a03SNomuLmwyT/Pgyc54qVfDeQF03uK6vg+fOwO0q",
- "zdlCc1MijH5xyZR9MwUuoJ3+VueCOtXJp8O1lycrzi2lRHn0tlzl26DdA2cd2t4cGoWsg/gbCi62SuNN",
- "q+6dYq9owYpuCb/eQwM2BaquB+vfwkopF5ylWC4ieKWlBtm9vzLGVzCiskbXGOWPuDuhkcMVLRxYhxM5",
- "LA6WEvSM0CGub6wMvppNtdRh/9T40MeaarICrRxng2zq6186ewnjCly9JHyKJ+CTQrb8L8ghoy69pDb9",
- "3pCMMHZ+QAD+1nx779QjDCo9ZxwFIYc2F79qLRr4PIQ20hPTZCVAufW0EwjVR9Nnhrl0GVx9nvnnJHAM",
- "674wy7a+uv5Qh95z5zxlpu0b09ZWTmh+boUp2kkPy9JNOlwdNSoP6Cs+iOCIBybxJvAAufX44WhbyG2r",
- "yx3vU0NocIEOOyjxHu4RRl0ptFMa+YLmlaUobEFsqEs0YZrxCBjvGIfmsZPIBZFGrwTcGDyvA/1UKqm2",
- "IuAonnYGNEcvXYyhKe1MtHcdqrPBiBJco59jeBubIqcDjKNu0AhulG/qN1YMdQfCxBt83Mkhsl+yFKUq",
- "J0RlGHbcKWIaYxyGcfsyye0LoH8M+jKR7a4ltSfnJjfRUCbZospWoBOaZbFCc9/gV4JfSVah5ABXkFZ1",
- "oa6yJCmWYGjXpOhTm5soFVxVxZa5fIM7TpeKmBz9HidQPq66GXxGkP0a1nv09uTD2zeHZ2+P7H1h1HKb",
- "SmZkbgmFYYhGj1UajOhcKSC/hmj8Ffv92llwHMygeHGEaMMCyp4QMaB+scF/Y8W0hgnI+dRvHNXlHejY",
- "8cbifXuknnBujl6i2CoZjwm8+u6Ojmbq253Hpv+9HshcrNqAPHCa+zZmHO5RjA2/NfdbmAXeqxBnb8A6",
- "SRtjqIR/BwG12zq9sM088cbtlYxD231d0n679WS4OP0U7+iBSMoguZ9aMcA6g4biKdPB8F+qXRaOpmQr",
- "p8SK8rERbDCGrWRvn/uMGsKGAjBs/IX53Os9ToDtqQM49laE+siePkDf+7BBUlLmPJ0Ns+hj1gUY90O+",
- "x4QeNhvcXYQL28VBYiuJVwgfrrPR1NbAa6AUijVVLWOlw0eGlZxh9e+gTkh/LO/TvYBUG6E+8FVJgJtU",
- "DTGTBQ8dfK23MaB+1NE3rszGttoa/fqlO5hNLwMgyGKxtR9n4ytJHNYRCegnxacGVsDdWwPt2N7REYbL",
- "JaSaXezIuPin0VKbaP6p12PtQzZBAgarI9b8A8M3VK8bgLYlRGyFJ6g/dWdwhuKtz2HzSJEWNUSLUU49",
- "z7tNojJiALlDYkhEqJjHzxrenBOGqZoyEAvew267Q1PyZbAKeJA/dMu5PEkSGuYUbZnyQsQ091Fzma43",
- "yrTD4KuhpIx+Hd5hQegIyx6r+gWH+gXhQKshx/1yUJcuURrzY2pbs0+ZBuV/88lwdhb7MnVTpxwt+5dU",
- "Zr5FVFX1WnCy5T7qZVL4GrJdoJf1zKyJh+rHzkcKjGDUW5oLxfgqGQodbIcgha/aoaMVrwMscIxwLUG6",
- "9wm0f/g70cLHT22DYxsq3Atst0GCGizqZYEbTLX/0NQSwDKJ1D777pzI4QKN3koNdDLI+B+ecxuy39jv",
- "Pljcl8kboZE7ek12puz7SDimekgMqX5J3G25Owj9Nlov49y+V6Ni6f/coDK0HpdSZFVqL+jwYDQ2hrHF",
- "NbawkqjCmPZX2ZP9cyw18y5I6TmHzdzK3+ma8qbmT/tYWxHKriFIoe3s9r0aBOK6T76yC1jdC5x/pFI9",
- "nZRC5MmAufi4X8WgewbOWXoOGTF3h48hGagETh6jlbL2B16uNz5rvyyBQ/ZkRohRy4tSb7xrsF2QszM5",
- "f6S3zX+Fs2aVLSzi9P3ZJx4Pf8KSH/KO/M0Ps52rKTDM745T2UF2lAm4GqigIOllpC7+2LcaI866bq3y",
- "hqgsFDEp5ZY5o6POd1/nj5B+UKx7u/YTppT7rM9USGs6QmnJG3S6wssPjUVoXNlw32EHeKFSHBQO99zI",
- "gfMHxwj9UCMlWMogJbSWv0vP9k+M1nwp2CKFEchmmcoWDBN9oTIwoqg3tW0ijue+CQPzxwXHmhp904dC",
- "UyLWpQwJx5xLeUHzhzdfYGGBQ8SHe/0mvtBQ/w2RbFGpbhet8I6OmjvQde9van6C5pZ/gtmjqA3YDeXs",
- "qHXBdl9nDusn0Zzkonm4AYcklzimNRrvvyILF5FaSkiZYp1g/UtfMq9W97CCbPMo0nb9ctc6fxb6DmTs",
- "FARRkvdN+S0t8H5oIGyO6B/MVAZObpTKY9TXI4sI/mI8KkwN3XFdnLesybacYSeaQ0i4Z6ty4Ma+oVW5",
- "n/Q6dnm4Drx0KgX9dY6+rVu4jVzUzdrGukT6yB32ZOjFGE9GvPSa6Y6uFIsQrFtIEFTy6/6vRMISC5ML",
- "8vQpTvD06dQ1/fVZ+7M5zk+fRsW4B3OitN4PdvPGKObnoeg/G+E2EGja2Y+K5dkuwmiFDTePBGBg7C8u",
- "wPoPeabgF2tP7R9VV+D5Ju7b7iYgYiJrbU0eTBUEBI+IBXbdZtEXnhWklWR6g3nf3vzGfonW0/muttg7",
- "j0+dKejuPi3Ooa4c0Nj3K+Vv1++EfRG6MDI1Os81vhj19ooWZQ7uoPz90eKv8PxvL7K95/t/Xfxt7+Ve",
- "Ci9evt7bo69f0P3Xz/fh2d9evtiD/eWr14tn2bMXzxYvnr149fJ1+vzF/uLFq9d/fWT4kAHZAjrxWUaT",
- "/4lveSSHJ8fJmQG2wQktWf1QnCFjX0acpngSoaAsnxz4n/67P2GzVBTN8P7XiUtimKy1LtXBfH55eTkL",
- "u8xXaNBLtKjS9dzP03+g6+S4DrC2ibG4ozZ21pACbqojhUP89uHt6Rk5PDmeNQQzOZjszfZm+/j8Tgmc",
- "lmxyMHmOP+HpWeO+zx2xTQ6+XE8n8zXQHP1f5o8CtGSp/6Qu6WoFcubqqZufLp7NvSgx/+KMmdfbvs3D",
- "0oTzLy2bb7ajJ1Zvm3/xScnbW7eyfp2tO+gwEorhKe0TtPMvKMoO/j53CQjxj6hSWFqdewdGvGVrNV/0",
- "Fcuuuz3ci5DzL80Trdf2MOcQc1fYuH0avOg6JUwTuhASs3Z1ujbn16cLMtV+0bcmxuPMEKHp9aZ+rjao",
- "lHTwsSeN24GIHwlPrCHH5kC1Zmp4ppYVhMV76huh1b65Fz7uJa8/f9mf7u9d/8Xwfffny+fXI/2Ob5rX",
- "bk9rpj6y4WfMtUMLKp6zZ3t7d3jM6ZCHT+/iJgVvhkVf4K7KpBjSst1WdQYiNTJ25AR1hh947/PFDVe8",
- "1c7TivKJvM3wDc2IT2XBufcfbu5jjl5fw3+JvV+up5OXD7n6Y25InuYEWwZJ3v2t/4mfc3HJfUsjDFRF",
- "QeXGH2PVYgr+EWq8cuhKodVPsguqYfIZzcqxmMcB5qI0vQVzOTW9vjKXh2IuuEn3wVzaA90zc3l2wwP+",
- "51/xV3b6Z2Onp5bdjWenTpSz2ZJz+35kI+H13hJYQTRtExMo6baHobsc9jvQvXeuJ3dkMX/Yk9f/f5+T",
- "F3svHg6CdiHs72FD3gtNvkX765/0zI47PtskoY5mlGU9IrfsH5T+RmSbLRgq1Kp0GU4RuWTBuAG5f7v0",
- "X1bsvUN9Dhtiw4+8m5mLDHry0PUdecCf9snsrzzkKw+RdvrnDzf9KcgLlgI5g6IUkkqWb8hPvM5Pv71a",
- "l2XR0O720e/xNKONpCKDFfDEMaxkIbKNr03YGvAcrIW7J6jMv7QLjFsr2qBZ6gh/r9897AO92JDjo54E",
- "Y7t1Oe03G2za0RgjOmEXxK2aYZcXDShj28jcLGQlNLFYyNyivjKer4znTsLL6MMTk1+i2oQ35HTv5Kkv",
- "1BIrZUR1f+oxOscfelzvZaP7+kxMf7Eh8JCR4IPN1eqi+StL+MoS7sYSvoPIYcRT65hEhOhuY+ntMwiM",
- "9s26z/RgFIRvXuVUEgVjzRSHOKIzTjwEl3hoJS2KK6ujUU7gitlAx8iG3a/e9pXFfWVxfyKv1W5G0xZE",
- "bqzpnMOmoGWt36h1pTNxaQscRrki1v6nuSsUjEGYdUCHFsQP0CTVkh9dQYJ8g5GnLDNinGYFGJGq5nWm",
- "s0+VaGKizQjNe80rxnECZBU4i62ITYN0NQWp4PZ1046vzUH23uqEMSb7WwXI0RxuHIyTacvZ4rYxUn/6",
- "zvJX3zdyvcWWjlRhw8X7wRr1+6Wtv+eXlOlkKaRLZUX09TtroPnc1fnq/NrUrOh9wUIcwY9BYEf813n9",
- "XkP0YzeyJfbVRYz4Rk3oWhgKhhtcB4F9/Gz2Ccv9ur1vIpsO5nPM/1oLpeeT6+mXTtRT+PFzvTVf6mvZ",
- "bdH15+v/GwAA//9MwM7Ji70AAA==",
-}
-
-// GetSwagger returns the Swagger specification corresponding to the generated code
-// in this file.
-func GetSwagger() (*openapi3.Swagger, error) {
- zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
- if err != nil {
- return nil, fmt.Errorf("error base64 decoding spec: %s", err)
- }
- zr, err := gzip.NewReader(bytes.NewReader(zipped))
- if err != nil {
- return nil, fmt.Errorf("error decompressing spec: %s", err)
- }
- var buf bytes.Buffer
- _, err = buf.ReadFrom(zr)
- if err != nil {
- return nil, fmt.Errorf("error decompressing spec: %s", err)
- }
-
- swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(buf.Bytes())
- if err != nil {
- return nil, fmt.Errorf("error loading Swagger: %s", err)
- }
- return swagger, nil
-}
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
deleted file mode 100644
index 70a6da158..000000000
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ /dev/null
@@ -1,841 +0,0 @@
-// Package private provides primitives to interact the openapi HTTP API.
-//
-// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
-package private
-
-import (
- "encoding/json"
- "time"
-)
-
-// Account defines model for Account.
-type Account struct {
-
- // the account public key
- Address string `json:"address"`
-
- // \[algo\] total number of MicroAlgos in the account
- Amount uint64 `json:"amount"`
-
- // specifies the amount of MicroAlgos in the account, without the pending rewards.
- AmountWithoutPendingRewards uint64 `json:"amount-without-pending-rewards"`
-
- // \[appl\] applications local data stored in this account.
- //
- // Note the raw object uses `map[int] -> AppLocalState` for this type.
- AppsLocalState *[]ApplicationLocalState `json:"apps-local-state,omitempty"`
-
- // \[teap\] the sum of all extra application program pages for this account.
- AppsTotalExtraPages *uint64 `json:"apps-total-extra-pages,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- AppsTotalSchema *ApplicationStateSchema `json:"apps-total-schema,omitempty"`
-
- // \[asset\] assets held by this account.
- //
- // Note the raw object uses `map[int] -> AssetHolding` for this type.
- Assets *[]AssetHolding `json:"assets,omitempty"`
-
- // \[spend\] the address against which signing should be checked. If empty, the address of the current account is used. This field can be updated in any transaction by setting the RekeyTo field.
- AuthAddr *string `json:"auth-addr,omitempty"`
-
- // \[appp\] parameters of applications created by this account including app global data.
- //
- // Note: the raw account uses `map[int] -> AppParams` for this type.
- CreatedApps *[]Application `json:"created-apps,omitempty"`
-
- // \[apar\] parameters of assets created by this account.
- //
- // Note: the raw account uses `map[int] -> Asset` for this type.
- CreatedAssets *[]Asset `json:"created-assets,omitempty"`
-
- // MicroAlgo balance required by the account.
- //
- // The requirement grows based on asset and application usage.
- MinBalance uint64 `json:"min-balance"`
-
- // AccountParticipation describes the parameters used by this account in consensus protocol.
- Participation *AccountParticipation `json:"participation,omitempty"`
-
- // amount of MicroAlgos of pending rewards in this account.
- PendingRewards uint64 `json:"pending-rewards"`
-
- // \[ebase\] used as part of the rewards computation. Only applicable to accounts which are participating.
- RewardBase *uint64 `json:"reward-base,omitempty"`
-
- // \[ern\] total rewards of MicroAlgos the account has received, including pending rewards.
- Rewards uint64 `json:"rewards"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-
- // Indicates what type of signature is used by this account, must be one of:
- // * sig
- // * msig
- // * lsig
- SigType *string `json:"sig-type,omitempty"`
-
- // \[onl\] delegation status of the account's MicroAlgos
- // * Offline - indicates that the associated account is delegated.
- // * Online - indicates that the associated account used as part of the delegation pool.
- // * NotParticipating - indicates that the associated account is neither a delegator nor a delegate.
- Status string `json:"status"`
-
- // The count of all applications that have been opted in, equivalent to the count of application local data (AppLocalState objects) stored in this account.
- TotalAppsOptedIn uint64 `json:"total-apps-opted-in"`
-
- // The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.
- TotalAssetsOptedIn uint64 `json:"total-assets-opted-in"`
-
- // The count of all apps (AppParams objects) created by this account.
- TotalCreatedApps uint64 `json:"total-created-apps"`
-
- // The count of all assets (AssetParams objects) created by this account.
- TotalCreatedAssets uint64 `json:"total-created-assets"`
-}
-
-// AccountParticipation defines model for AccountParticipation.
-type AccountParticipation struct {
-
- // \[sel\] Selection public key (if any) currently registered for this round.
- SelectionParticipationKey []byte `json:"selection-participation-key"`
-
- // \[stprf\] Root of the state proof key (if any)
- StateProofKey *[]byte `json:"state-proof-key,omitempty"`
-
- // \[voteFst\] First round for which this participation is valid.
- VoteFirstValid uint64 `json:"vote-first-valid"`
-
- // \[voteKD\] Number of subkeys in each batch of participation keys.
- VoteKeyDilution uint64 `json:"vote-key-dilution"`
-
- // \[voteLst\] Last round for which this participation is valid.
- VoteLastValid uint64 `json:"vote-last-valid"`
-
- // \[vote\] root participation public key (if any) currently registered for this round.
- VoteParticipationKey []byte `json:"vote-participation-key"`
-}
-
-// AccountStateDelta defines model for AccountStateDelta.
-type AccountStateDelta struct {
- Address string `json:"address"`
-
- // Application state delta.
- Delta StateDelta `json:"delta"`
-}
-
-// Application defines model for Application.
-type Application struct {
-
- // \[appidx\] application index.
- Id uint64 `json:"id"`
-
- // Stores the global information associated with an application.
- Params ApplicationParams `json:"params"`
-}
-
-// ApplicationLocalState defines model for ApplicationLocalState.
-type ApplicationLocalState struct {
-
- // The application which this local state is for.
- Id uint64 `json:"id"`
-
- // Represents a key-value store for use in an application.
- KeyValue *TealKeyValueStore `json:"key-value,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- Schema ApplicationStateSchema `json:"schema"`
-}
-
-// ApplicationParams defines model for ApplicationParams.
-type ApplicationParams struct {
-
- // \[approv\] approval program.
- ApprovalProgram []byte `json:"approval-program"`
-
- // \[clearp\] approval program.
- ClearStateProgram []byte `json:"clear-state-program"`
-
- // The address that created this application. This is the address where the parameters and global state for this application can be found.
- Creator string `json:"creator"`
-
- // \[epp\] the amount of extra program pages available to this app.
- ExtraProgramPages *uint64 `json:"extra-program-pages,omitempty"`
-
- // Represents a key-value store for use in an application.
- GlobalState *TealKeyValueStore `json:"global-state,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- GlobalStateSchema *ApplicationStateSchema `json:"global-state-schema,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- LocalStateSchema *ApplicationStateSchema `json:"local-state-schema,omitempty"`
-}
-
-// ApplicationStateSchema defines model for ApplicationStateSchema.
-type ApplicationStateSchema struct {
-
- // \[nbs\] num of byte slices.
- NumByteSlice uint64 `json:"num-byte-slice"`
-
- // \[nui\] num of uints.
- NumUint uint64 `json:"num-uint"`
-}
-
-// Asset defines model for Asset.
-type Asset struct {
-
- // unique asset identifier
- Index uint64 `json:"index"`
-
- // AssetParams specifies the parameters for an asset.
- //
- // \[apar\] when part of an AssetConfig transaction.
- //
- // Definition:
- // data/transactions/asset.go : AssetParams
- Params AssetParams `json:"params"`
-}
-
-// AssetHolding defines model for AssetHolding.
-type AssetHolding struct {
-
- // \[a\] number of units held.
- Amount uint64 `json:"amount"`
-
- // Asset ID of the holding.
- AssetId uint64 `json:"asset-id"`
-
- // \[f\] whether or not the holding is frozen.
- IsFrozen bool `json:"is-frozen"`
-}
-
-// AssetParams defines model for AssetParams.
-type AssetParams struct {
-
- // \[c\] Address of account used to clawback holdings of this asset. If empty, clawback is not permitted.
- Clawback *string `json:"clawback,omitempty"`
-
- // The address that created this asset. This is the address where the parameters for this asset can be found, and also the address where unwanted asset units can be sent in the worst case.
- Creator string `json:"creator"`
-
- // \[dc\] The number of digits to use after the decimal point when displaying this asset. If 0, the asset is not divisible. If 1, the base unit of the asset is in tenths. If 2, the base unit of the asset is in hundredths, and so on. This value must be between 0 and 19 (inclusive).
- Decimals uint64 `json:"decimals"`
-
- // \[df\] Whether holdings of this asset are frozen by default.
- DefaultFrozen *bool `json:"default-frozen,omitempty"`
-
- // \[f\] Address of account used to freeze holdings of this asset. If empty, freezing is not permitted.
- Freeze *string `json:"freeze,omitempty"`
-
- // \[m\] Address of account used to manage the keys of this asset and to destroy it.
- Manager *string `json:"manager,omitempty"`
-
- // \[am\] A commitment to some unspecified asset metadata. The format of this metadata is up to the application.
- MetadataHash *[]byte `json:"metadata-hash,omitempty"`
-
- // \[an\] Name of this asset, as supplied by the creator. Included only when the asset name is composed of printable utf-8 characters.
- Name *string `json:"name,omitempty"`
-
- // Base64 encoded name of this asset, as supplied by the creator.
- NameB64 *[]byte `json:"name-b64,omitempty"`
-
- // \[r\] Address of account holding reserve (non-minted) units of this asset.
- Reserve *string `json:"reserve,omitempty"`
-
- // \[t\] The total number of units of this asset.
- Total uint64 `json:"total"`
-
- // \[un\] Name of a unit of this asset, as supplied by the creator. Included only when the name of a unit of this asset is composed of printable utf-8 characters.
- UnitName *string `json:"unit-name,omitempty"`
-
- // Base64 encoded name of a unit of this asset, as supplied by the creator.
- UnitNameB64 *[]byte `json:"unit-name-b64,omitempty"`
-
- // \[au\] URL where more information about the asset can be retrieved. Included only when the URL is composed of printable utf-8 characters.
- Url *string `json:"url,omitempty"`
-
- // Base64 encoded URL where more information about the asset can be retrieved.
- UrlB64 *[]byte `json:"url-b64,omitempty"`
-}
-
-// BuildVersion defines model for BuildVersion.
-type BuildVersion struct {
- Branch string `json:"branch"`
- BuildNumber uint64 `json:"build_number"`
- Channel string `json:"channel"`
- CommitHash string `json:"commit_hash"`
- Major uint64 `json:"major"`
- Minor uint64 `json:"minor"`
-}
-
-// DryrunRequest defines model for DryrunRequest.
-type DryrunRequest struct {
- Accounts []Account `json:"accounts"`
- Apps []Application `json:"apps"`
-
- // LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.
- LatestTimestamp uint64 `json:"latest-timestamp"`
-
- // ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in.
- ProtocolVersion string `json:"protocol-version"`
-
- // Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to.
- Round uint64 `json:"round"`
- Sources []DryrunSource `json:"sources"`
- Txns []json.RawMessage `json:"txns"`
-}
-
-// DryrunSource defines model for DryrunSource.
-type DryrunSource struct {
- AppIndex uint64 `json:"app-index"`
-
- // FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex].
- FieldName string `json:"field-name"`
- Source string `json:"source"`
- TxnIndex uint64 `json:"txn-index"`
-}
-
-// DryrunState defines model for DryrunState.
-type DryrunState struct {
-
- // Evaluation error if any
- Error *string `json:"error,omitempty"`
-
- // Line number
- Line uint64 `json:"line"`
-
- // Program counter
- Pc uint64 `json:"pc"`
- Scratch *[]TealValue `json:"scratch,omitempty"`
- Stack []TealValue `json:"stack"`
-}
-
-// DryrunTxnResult defines model for DryrunTxnResult.
-type DryrunTxnResult struct {
- AppCallMessages *[]string `json:"app-call-messages,omitempty"`
- AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"`
-
- // Budget added during execution of app call transaction.
- BudgetAdded *uint64 `json:"budget-added,omitempty"`
-
- // Budget consumed during execution of app call transaction.
- BudgetConsumed *uint64 `json:"budget-consumed,omitempty"`
-
- // Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.
- Cost *uint64 `json:"cost,omitempty"`
-
- // Disassembled program line by line.
- Disassembly []string `json:"disassembly"`
-
- // Application state delta.
- GlobalDelta *StateDelta `json:"global-delta,omitempty"`
- LocalDeltas *[]AccountStateDelta `json:"local-deltas,omitempty"`
-
- // Disassembled lsig program line by line.
- LogicSigDisassembly *[]string `json:"logic-sig-disassembly,omitempty"`
- LogicSigMessages *[]string `json:"logic-sig-messages,omitempty"`
- LogicSigTrace *[]DryrunState `json:"logic-sig-trace,omitempty"`
- Logs *[][]byte `json:"logs,omitempty"`
-}
-
-// ErrorResponse defines model for ErrorResponse.
-type ErrorResponse struct {
- Data *map[string]interface{} `json:"data,omitempty"`
- Message string `json:"message"`
-}
-
-// EvalDelta defines model for EvalDelta.
-type EvalDelta struct {
-
- // \[at\] delta action.
- Action uint64 `json:"action"`
-
- // \[bs\] bytes value.
- Bytes *string `json:"bytes,omitempty"`
-
- // \[ui\] uint value.
- Uint *uint64 `json:"uint,omitempty"`
-}
-
-// EvalDeltaKeyValue defines model for EvalDeltaKeyValue.
-type EvalDeltaKeyValue struct {
- Key string `json:"key"`
-
- // Represents a TEAL value delta.
- Value EvalDelta `json:"value"`
-}
-
-// LightBlockHeaderProof defines model for LightBlockHeaderProof.
-type LightBlockHeaderProof struct {
-
- // The index of the light block header in the vector commitment tree
- Index uint64 `json:"index"`
-
- // The encoded proof.
- Proof []byte `json:"proof"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
-
-// ParticipationKey defines model for ParticipationKey.
-type ParticipationKey struct {
-
- // Address the key was generated for.
- Address string `json:"address"`
-
- // When registered, this is the first round it may be used.
- EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
-
- // When registered, this is the last round it may be used.
- EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
-
- // The key's ParticipationID.
- Id string `json:"id"`
-
- // AccountParticipation describes the parameters used by this account in consensus protocol.
- Key AccountParticipation `json:"key"`
-
- // Round when this key was last used to propose a block.
- LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
-
- // Round when this key was last used to generate a state proof.
- LastStateProof *uint64 `json:"last-state-proof,omitempty"`
-
- // Round when this key was last used to vote.
- LastVote *uint64 `json:"last-vote,omitempty"`
-}
-
-// PendingTransactionResponse defines model for PendingTransactionResponse.
-type PendingTransactionResponse struct {
-
- // The application index if the transaction was found and it created an application.
- ApplicationIndex *uint64 `json:"application-index,omitempty"`
-
- // The number of the asset's unit that were transferred to the close-to address.
- AssetClosingAmount *uint64 `json:"asset-closing-amount,omitempty"`
-
- // The asset index if the transaction was found and it created an asset.
- AssetIndex *uint64 `json:"asset-index,omitempty"`
-
- // Rewards in microalgos applied to the close remainder to account.
- CloseRewards *uint64 `json:"close-rewards,omitempty"`
-
- // Closing amount for the transaction.
- ClosingAmount *uint64 `json:"closing-amount,omitempty"`
-
- // The round where this transaction was confirmed, if present.
- ConfirmedRound *uint64 `json:"confirmed-round,omitempty"`
-
- // Application state delta.
- GlobalStateDelta *StateDelta `json:"global-state-delta,omitempty"`
-
- // Inner transactions produced by application execution.
- InnerTxns *[]PendingTransactionResponse `json:"inner-txns,omitempty"`
-
- // \[ld\] Local state key/value changes for the application being executed by this transaction.
- LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
-
- // \[lg\] Logs for the application being executed by this transaction.
- Logs *[][]byte `json:"logs,omitempty"`
-
- // Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
- PoolError string `json:"pool-error"`
-
- // Rewards in microalgos applied to the receiver account.
- ReceiverRewards *uint64 `json:"receiver-rewards,omitempty"`
-
- // Rewards in microalgos applied to the sender account.
- SenderRewards *uint64 `json:"sender-rewards,omitempty"`
-
- // The raw signed transaction.
- Txn map[string]interface{} `json:"txn"`
-}
-
-// StateDelta defines model for StateDelta.
-type StateDelta []EvalDeltaKeyValue
-
-// StateProof defines model for StateProof.
-type StateProof struct {
-
- // Represents the message that the state proofs are attesting to.
- Message StateProofMessage `json:"Message"`
-
- // The encoded StateProof for the message.
- StateProof []byte `json:"StateProof"`
-}
-
-// StateProofMessage defines model for StateProofMessage.
-type StateProofMessage struct {
-
- // The vector commitment root on all light block headers within a state proof interval.
- BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"`
-
- // The first round the message attests to.
- FirstAttestedRound uint64 `json:"FirstAttestedRound"`
-
- // The last round the message attests to.
- LastAttestedRound uint64 `json:"LastAttestedRound"`
-
- // An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.
- LnProvenWeight uint64 `json:"LnProvenWeight"`
-
- // The vector commitment root of the top N accounts to sign the next StateProof.
- VotersCommitment []byte `json:"VotersCommitment"`
-}
-
-// TealKeyValue defines model for TealKeyValue.
-type TealKeyValue struct {
- Key string `json:"key"`
-
- // Represents a TEAL value.
- Value TealValue `json:"value"`
-}
-
-// TealKeyValueStore defines model for TealKeyValueStore.
-type TealKeyValueStore []TealKeyValue
-
-// TealValue defines model for TealValue.
-type TealValue struct {
-
- // \[tb\] bytes value.
- Bytes string `json:"bytes"`
-
- // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
- Type uint64 `json:"type"`
-
- // \[ui\] uint value.
- Uint uint64 `json:"uint"`
-}
-
-// Version defines model for Version.
-type Version struct {
- Build BuildVersion `json:"build"`
- GenesisHashB64 []byte `json:"genesis_hash_b64"`
- GenesisId string `json:"genesis_id"`
- Versions []string `json:"versions"`
-}
-
-// AccountId defines model for account-id.
-type AccountId string
-
-// Address defines model for address.
-type Address string
-
-// AddressRole defines model for address-role.
-type AddressRole string
-
-// AfterTime defines model for after-time.
-type AfterTime time.Time
-
-// AssetId defines model for asset-id.
-type AssetId uint64
-
-// BeforeTime defines model for before-time.
-type BeforeTime time.Time
-
-// Catchpoint defines model for catchpoint.
-type Catchpoint string
-
-// CurrencyGreaterThan defines model for currency-greater-than.
-type CurrencyGreaterThan uint64
-
-// CurrencyLessThan defines model for currency-less-than.
-type CurrencyLessThan uint64
-
-// ExcludeCloseTo defines model for exclude-close-to.
-type ExcludeCloseTo bool
-
-// Format defines model for format.
-type Format string
-
-// Limit defines model for limit.
-type Limit uint64
-
-// Max defines model for max.
-type Max uint64
-
-// MaxRound defines model for max-round.
-type MaxRound uint64
-
-// MinRound defines model for min-round.
-type MinRound uint64
-
-// Next defines model for next.
-type Next string
-
-// NotePrefix defines model for note-prefix.
-type NotePrefix string
-
-// Round defines model for round.
-type Round uint64
-
-// RoundNumber defines model for round-number.
-type RoundNumber uint64
-
-// SigType defines model for sig-type.
-type SigType string
-
-// TxId defines model for tx-id.
-type TxId string
-
-// TxType defines model for tx-type.
-type TxType string
-
-// AccountApplicationResponse defines model for AccountApplicationResponse.
-type AccountApplicationResponse struct {
-
- // Stores local state associated with an application.
- AppLocalState *ApplicationLocalState `json:"app-local-state,omitempty"`
-
- // Stores the global information associated with an application.
- CreatedApp *ApplicationParams `json:"created-app,omitempty"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-}
-
-// AccountAssetResponse defines model for AccountAssetResponse.
-type AccountAssetResponse struct {
-
- // Describes an asset held by an account.
- //
- // Definition:
- // data/basics/userBalance.go : AssetHolding
- AssetHolding *AssetHolding `json:"asset-holding,omitempty"`
-
- // AssetParams specifies the parameters for an asset.
- //
- // \[apar\] when part of an AssetConfig transaction.
- //
- // Definition:
- // data/transactions/asset.go : AssetParams
- CreatedAsset *AssetParams `json:"created-asset,omitempty"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-}
-
-// AccountResponse defines model for AccountResponse.
-type AccountResponse Account
-
-// ApplicationResponse defines model for ApplicationResponse.
-type ApplicationResponse Application
-
-// AssetResponse defines model for AssetResponse.
-type AssetResponse Asset
-
-// BlockHashResponse defines model for BlockHashResponse.
-type BlockHashResponse struct {
-
- // Block header hash.
- BlockHash string `json:"blockHash"`
-}
-
-// BlockResponse defines model for BlockResponse.
-type BlockResponse struct {
-
- // Block header data.
- Block map[string]interface{} `json:"block"`
-
- // Optional certificate object. This is only included when the format is set to message pack.
- Cert *map[string]interface{} `json:"cert,omitempty"`
-}
-
-// CatchpointAbortResponse defines model for CatchpointAbortResponse.
-type CatchpointAbortResponse struct {
-
- // Catchup abort response string
- CatchupMessage string `json:"catchup-message"`
-}
-
-// CatchpointStartResponse defines model for CatchpointStartResponse.
-type CatchpointStartResponse struct {
-
- // Catchup start response string
- CatchupMessage string `json:"catchup-message"`
-}
-
-// CompileResponse defines model for CompileResponse.
-type CompileResponse struct {
-
- // base32 SHA512_256 of program bytes (Address style)
- Hash string `json:"hash"`
-
- // base64 encoded program bytes
- Result string `json:"result"`
-
- // JSON of the source map
- Sourcemap *map[string]interface{} `json:"sourcemap,omitempty"`
-}
-
-// DisassembleResponse defines model for DisassembleResponse.
-type DisassembleResponse struct {
-
- // disassembled Teal code
- Result string `json:"result"`
-}
-
-// DryrunResponse defines model for DryrunResponse.
-type DryrunResponse struct {
- Error string `json:"error"`
-
- // Protocol version is the protocol version Dryrun was operated under.
- ProtocolVersion string `json:"protocol-version"`
- Txns []DryrunTxnResult `json:"txns"`
-}
-
-// LightBlockHeaderProofResponse defines model for LightBlockHeaderProofResponse.
-type LightBlockHeaderProofResponse LightBlockHeaderProof
-
-// NodeStatusResponse defines model for NodeStatusResponse.
-type NodeStatusResponse struct {
-
- // The current catchpoint that is being caught up to
- Catchpoint *string `json:"catchpoint,omitempty"`
-
- // The number of blocks that have already been obtained by the node as part of the catchup
- CatchpointAcquiredBlocks *uint64 `json:"catchpoint-acquired-blocks,omitempty"`
-
- // The number of accounts from the current catchpoint that have been processed so far as part of the catchup
- CatchpointProcessedAccounts *uint64 `json:"catchpoint-processed-accounts,omitempty"`
-
- // The total number of accounts included in the current catchpoint
- CatchpointTotalAccounts *uint64 `json:"catchpoint-total-accounts,omitempty"`
-
- // The total number of blocks that are required to complete the current catchpoint catchup
- CatchpointTotalBlocks *uint64 `json:"catchpoint-total-blocks,omitempty"`
-
- // The number of accounts from the current catchpoint that have been verified so far as part of the catchup
- CatchpointVerifiedAccounts *uint64 `json:"catchpoint-verified-accounts,omitempty"`
-
- // CatchupTime in nanoseconds
- CatchupTime uint64 `json:"catchup-time"`
-
- // The last catchpoint seen by the node
- LastCatchpoint *string `json:"last-catchpoint,omitempty"`
-
- // LastRound indicates the last round seen
- LastRound uint64 `json:"last-round"`
-
- // LastVersion indicates the last consensus version supported
- LastVersion string `json:"last-version"`
-
- // NextVersion of consensus protocol to use
- NextVersion string `json:"next-version"`
-
- // NextVersionRound is the round at which the next consensus version will apply
- NextVersionRound uint64 `json:"next-version-round"`
-
- // NextVersionSupported indicates whether the next consensus version is supported by this node
- NextVersionSupported bool `json:"next-version-supported"`
-
- // StoppedAtUnsupportedRound indicates that the node does not support the new rounds and has stopped making progress
- StoppedAtUnsupportedRound bool `json:"stopped-at-unsupported-round"`
-
- // TimeSinceLastRound in nanoseconds
- TimeSinceLastRound uint64 `json:"time-since-last-round"`
-}
-
-// ParticipationKeyResponse defines model for ParticipationKeyResponse.
-type ParticipationKeyResponse ParticipationKey
-
-// ParticipationKeysResponse defines model for ParticipationKeysResponse.
-type ParticipationKeysResponse []ParticipationKey
-
-// PendingTransactionsResponse defines model for PendingTransactionsResponse.
-type PendingTransactionsResponse struct {
-
- // An array of signed transaction objects.
- TopTransactions []map[string]interface{} `json:"top-transactions"`
-
- // Total number of transactions in the pool.
- TotalTransactions uint64 `json:"total-transactions"`
-}
-
-// PostParticipationResponse defines model for PostParticipationResponse.
-type PostParticipationResponse struct {
-
- // encoding of the participation ID.
- PartId string `json:"partId"`
-}
-
-// PostTransactionsResponse defines model for PostTransactionsResponse.
-type PostTransactionsResponse struct {
-
- // encoding of the transaction hash.
- TxId string `json:"txId"`
-}
-
-// StateProofResponse defines model for StateProofResponse.
-type StateProofResponse StateProof
-
-// SupplyResponse defines model for SupplyResponse.
-type SupplyResponse struct {
-
- // Round
- CurrentRound uint64 `json:"current_round"`
-
- // OnlineMoney
- OnlineMoney uint64 `json:"online-money"`
-
- // TotalMoney
- TotalMoney uint64 `json:"total-money"`
-}
-
-// TransactionParametersResponse defines model for TransactionParametersResponse.
-type TransactionParametersResponse struct {
-
- // ConsensusVersion indicates the consensus protocol version
- // as of LastRound.
- ConsensusVersion string `json:"consensus-version"`
-
- // Fee is the suggested transaction fee
- // Fee is in units of micro-Algos per byte.
- // Fee may fall to zero but transactions must still have a fee of
- // at least MinTxnFee for the current network protocol.
- Fee uint64 `json:"fee"`
-
- // GenesisHash is the hash of the genesis block.
- GenesisHash []byte `json:"genesis-hash"`
-
- // GenesisID is an ID listed in the genesis block.
- GenesisId string `json:"genesis-id"`
-
- // LastRound indicates the last round seen
- LastRound uint64 `json:"last-round"`
-
- // The minimum transaction fee (not per byte) required for the
- // txn to validate for the current network protocol.
- MinFee uint64 `json:"min-fee"`
-}
-
-// TransactionProofResponse defines model for TransactionProofResponse.
-type TransactionProofResponse struct {
-
- // The type of hash function used to create the proof, must be one of:
- // * sha512_256
- // * sha256
- Hashtype string `json:"hashtype"`
-
- // Index of the transaction in the block's payset.
- Idx uint64 `json:"idx"`
-
- // Proof of transaction membership.
- Proof []byte `json:"proof"`
-
- // Hash of SignedTxnInBlock for verifying proof.
- Stibhash []byte `json:"stibhash"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
-
-// VersionsResponse defines model for VersionsResponse.
-type VersionsResponse Version
-
-// ShutdownNodeParams defines parameters for ShutdownNode.
-type ShutdownNodeParams struct {
- Timeout *uint64 `json:"timeout,omitempty"`
-}
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
deleted file mode 100644
index 7a5720857..000000000
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ /dev/null
@@ -1,1128 +0,0 @@
-// Package generated provides primitives to interact the openapi HTTP API.
-//
-// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
-package generated
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/base64"
- "fmt"
- "github.com/algorand/oapi-codegen/pkg/runtime"
- "github.com/getkin/kin-openapi/openapi3"
- "github.com/labstack/echo/v4"
- "net/http"
- "strings"
-)
-
-// ServerInterface represents all server handlers.
-type ServerInterface interface {
- // Get account information.
- // (GET /v2/accounts/{address})
- AccountInformation(ctx echo.Context, address string, params AccountInformationParams) error
- // Get account information about a given app.
- // (GET /v2/accounts/{address}/applications/{application-id})
- AccountApplicationInformation(ctx echo.Context, address string, applicationId uint64, params AccountApplicationInformationParams) error
- // Get account information about a given asset.
- // (GET /v2/accounts/{address}/assets/{asset-id})
- AccountAssetInformation(ctx echo.Context, address string, assetId uint64, params AccountAssetInformationParams) error
- // Get a list of unconfirmed transactions currently in the transaction pool by address.
- // (GET /v2/accounts/{address}/transactions/pending)
- GetPendingTransactionsByAddress(ctx echo.Context, address string, params GetPendingTransactionsByAddressParams) error
- // Get application information.
- // (GET /v2/applications/{application-id})
- GetApplicationByID(ctx echo.Context, applicationId uint64) error
- // Get asset information.
- // (GET /v2/assets/{asset-id})
- GetAssetByID(ctx echo.Context, assetId uint64) error
- // Get the block for the given round.
- // (GET /v2/blocks/{round})
- GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error
- // Get the block hash for the block on the given round.
- // (GET /v2/blocks/{round}/hash)
- GetBlockHash(ctx echo.Context, round uint64) error
- // Gets a proof for a given light block header inside a state proof commitment
- // (GET /v2/blocks/{round}/lightheader/proof)
- GetLightBlockHeaderProof(ctx echo.Context, round uint64) error
- // Get a proof for a transaction in a block.
- // (GET /v2/blocks/{round}/transactions/{txid}/proof)
- GetTransactionProof(ctx echo.Context, round uint64, txid string, params GetTransactionProofParams) error
- // Get the current supply reported by the ledger.
- // (GET /v2/ledger/supply)
- GetSupply(ctx echo.Context) error
- // Get a state proof that covers a given round
- // (GET /v2/stateproofs/{round})
- GetStateProof(ctx echo.Context, round uint64) error
- // Gets the current node status.
- // (GET /v2/status)
- GetStatus(ctx echo.Context) error
- // Gets the node status after waiting for the given round.
- // (GET /v2/status/wait-for-block-after/{round})
- WaitForBlock(ctx echo.Context, round uint64) error
- // Compile TEAL source code to binary, produce its hash
- // (POST /v2/teal/compile)
- TealCompile(ctx echo.Context, params TealCompileParams) error
- // Disassemble program bytes into the TEAL source code.
- // (POST /v2/teal/disassemble)
- TealDisassemble(ctx echo.Context) error
- // Provide debugging information for a transaction (or group).
- // (POST /v2/teal/dryrun)
- TealDryrun(ctx echo.Context) error
- // Broadcasts a raw transaction to the network.
- // (POST /v2/transactions)
- RawTransaction(ctx echo.Context) error
- // Get parameters for constructing a new transaction
- // (GET /v2/transactions/params)
- TransactionParams(ctx echo.Context) error
- // Get a list of unconfirmed transactions currently in the transaction pool.
- // (GET /v2/transactions/pending)
- GetPendingTransactions(ctx echo.Context, params GetPendingTransactionsParams) error
- // Get a specific pending transaction.
- // (GET /v2/transactions/pending/{txid})
- PendingTransactionInformation(ctx echo.Context, txid string, params PendingTransactionInformationParams) error
-}
-
-// ServerInterfaceWrapper converts echo contexts to parameters.
-type ServerInterfaceWrapper struct {
- Handler ServerInterface
-}
-
-// AccountInformation converts echo context to params.
-func (w *ServerInterfaceWrapper) AccountInformation(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "format": true,
- "exclude": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params AccountInformationParams
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // ------------- Optional query parameter "exclude" -------------
- if paramValue := ctx.QueryParam("exclude"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "exclude", ctx.QueryParams(), &params.Exclude)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter exclude: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AccountInformation(ctx, address, params)
- return err
-}
-
-// AccountApplicationInformation converts echo context to params.
-func (w *ServerInterfaceWrapper) AccountApplicationInformation(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
-
- // ------------- Path parameter "application-id" -------------
- var applicationId uint64
-
- err = runtime.BindStyledParameter("simple", false, "application-id", ctx.Param("application-id"), &applicationId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params AccountApplicationInformationParams
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AccountApplicationInformation(ctx, address, applicationId, params)
- return err
-}
-
-// AccountAssetInformation converts echo context to params.
-func (w *ServerInterfaceWrapper) AccountAssetInformation(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
-
- // ------------- Path parameter "asset-id" -------------
- var assetId uint64
-
- err = runtime.BindStyledParameter("simple", false, "asset-id", ctx.Param("asset-id"), &assetId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params AccountAssetInformationParams
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.AccountAssetInformation(ctx, address, assetId, params)
- return err
-}
-
-// GetPendingTransactionsByAddress converts echo context to params.
-func (w *ServerInterfaceWrapper) GetPendingTransactionsByAddress(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "max": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params GetPendingTransactionsByAddressParams
- // ------------- Optional query parameter "max" -------------
- if paramValue := ctx.QueryParam("max"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
- }
-
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetPendingTransactionsByAddress(ctx, address, params)
- return err
-}
-
-// GetApplicationByID converts echo context to params.
-func (w *ServerInterfaceWrapper) GetApplicationByID(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "application-id" -------------
- var applicationId uint64
-
- err = runtime.BindStyledParameter("simple", false, "application-id", ctx.Param("application-id"), &applicationId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetApplicationByID(ctx, applicationId)
- return err
-}
-
-// GetAssetByID converts echo context to params.
-func (w *ServerInterfaceWrapper) GetAssetByID(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "asset-id" -------------
- var assetId uint64
-
- err = runtime.BindStyledParameter("simple", false, "asset-id", ctx.Param("asset-id"), &assetId)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter asset-id: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetAssetByID(ctx, assetId)
- return err
-}
-
-// GetBlock converts echo context to params.
-func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params GetBlockParams
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetBlock(ctx, round, params)
- return err
-}
-
-// GetBlockHash converts echo context to params.
-func (w *ServerInterfaceWrapper) GetBlockHash(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetBlockHash(ctx, round)
- return err
-}
-
-// GetLightBlockHeaderProof converts echo context to params.
-func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetLightBlockHeaderProof(ctx, round)
- return err
-}
-
-// GetTransactionProof converts echo context to params.
-func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "hashtype": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- // ------------- Path parameter "txid" -------------
- var txid string
-
- err = runtime.BindStyledParameter("simple", false, "txid", ctx.Param("txid"), &txid)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params GetTransactionProofParams
- // ------------- Optional query parameter "hashtype" -------------
- if paramValue := ctx.QueryParam("hashtype"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "hashtype", ctx.QueryParams(), &params.Hashtype)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter hashtype: %s", err))
- }
-
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetTransactionProof(ctx, round, txid, params)
- return err
-}
-
-// GetSupply converts echo context to params.
-func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetSupply(ctx)
- return err
-}
-
-// GetStateProof converts echo context to params.
-func (w *ServerInterfaceWrapper) GetStateProof(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetStateProof(ctx, round)
- return err
-}
-
-// GetStatus converts echo context to params.
-func (w *ServerInterfaceWrapper) GetStatus(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetStatus(ctx)
- return err
-}
-
-// WaitForBlock converts echo context to params.
-func (w *ServerInterfaceWrapper) WaitForBlock(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.WaitForBlock(ctx, round)
- return err
-}
-
-// TealCompile converts echo context to params.
-func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "sourcemap": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params TealCompileParams
- // ------------- Optional query parameter "sourcemap" -------------
- if paramValue := ctx.QueryParam("sourcemap"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "sourcemap", ctx.QueryParams(), &params.Sourcemap)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter sourcemap: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.TealCompile(ctx, params)
- return err
-}
-
-// TealDisassemble converts echo context to params.
-func (w *ServerInterfaceWrapper) TealDisassemble(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.TealDisassemble(ctx)
- return err
-}
-
-// TealDryrun converts echo context to params.
-func (w *ServerInterfaceWrapper) TealDryrun(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.TealDryrun(ctx)
- return err
-}
-
-// RawTransaction converts echo context to params.
-func (w *ServerInterfaceWrapper) RawTransaction(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.RawTransaction(ctx)
- return err
-}
-
-// TransactionParams converts echo context to params.
-func (w *ServerInterfaceWrapper) TransactionParams(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.TransactionParams(ctx)
- return err
-}
-
-// GetPendingTransactions converts echo context to params.
-func (w *ServerInterfaceWrapper) GetPendingTransactions(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "max": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params GetPendingTransactionsParams
- // ------------- Optional query parameter "max" -------------
- if paramValue := ctx.QueryParam("max"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
- }
-
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetPendingTransactions(ctx, params)
- return err
-}
-
-// PendingTransactionInformation converts echo context to params.
-func (w *ServerInterfaceWrapper) PendingTransactionInformation(ctx echo.Context) error {
-
- validQueryParams := map[string]bool{
- "pretty": true,
- "format": true,
- }
-
- // Check for unknown query parameters.
- for name, _ := range ctx.QueryParams() {
- if _, ok := validQueryParams[name]; !ok {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
- }
- }
-
- var err error
- // ------------- Path parameter "txid" -------------
- var txid string
-
- err = runtime.BindStyledParameter("simple", false, "txid", ctx.Param("txid"), &txid)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err))
- }
-
- ctx.Set("api_key.Scopes", []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params PendingTransactionInformationParams
- // ------------- Optional query parameter "format" -------------
- if paramValue := ctx.QueryParam("format"); paramValue != "" {
-
- }
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.PendingTransactionInformation(ctx, txid, params)
- return err
-}
-
-// RegisterHandlers adds each server route to the EchoRouter.
-func RegisterHandlers(router interface {
- CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
- TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
-}, si ServerInterface, m ...echo.MiddlewareFunc) {
-
- wrapper := ServerInterfaceWrapper{
- Handler: si,
- }
-
- router.GET("/v2/accounts/:address", wrapper.AccountInformation, m...)
- router.GET("/v2/accounts/:address/applications/:application-id", wrapper.AccountApplicationInformation, m...)
- router.GET("/v2/accounts/:address/assets/:asset-id", wrapper.AccountAssetInformation, m...)
- router.GET("/v2/accounts/:address/transactions/pending", wrapper.GetPendingTransactionsByAddress, m...)
- router.GET("/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
- router.GET("/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
- router.GET("/v2/blocks/:round", wrapper.GetBlock, m...)
- router.GET("/v2/blocks/:round/hash", wrapper.GetBlockHash, m...)
- router.GET("/v2/blocks/:round/lightheader/proof", wrapper.GetLightBlockHeaderProof, m...)
- router.GET("/v2/blocks/:round/transactions/:txid/proof", wrapper.GetTransactionProof, m...)
- router.GET("/v2/ledger/supply", wrapper.GetSupply, m...)
- router.GET("/v2/stateproofs/:round", wrapper.GetStateProof, m...)
- router.GET("/v2/status", wrapper.GetStatus, m...)
- router.GET("/v2/status/wait-for-block-after/:round", wrapper.WaitForBlock, m...)
- router.POST("/v2/teal/compile", wrapper.TealCompile, m...)
- router.POST("/v2/teal/disassemble", wrapper.TealDisassemble, m...)
- router.POST("/v2/teal/dryrun", wrapper.TealDryrun, m...)
- router.POST("/v2/transactions", wrapper.RawTransaction, m...)
- router.GET("/v2/transactions/params", wrapper.TransactionParams, m...)
- router.GET("/v2/transactions/pending", wrapper.GetPendingTransactions, m...)
- router.GET("/v2/transactions/pending/:txid", wrapper.PendingTransactionInformation, m...)
-
-}
-
-// Base64 encoded, gzipped, json marshaled Swagger object
-var swaggerSpec = []string{
-
- "H4sIAAAAAAAC/+x9aXPctrLoX8Gbe6u83OFI3nKPVZW6T7GdHL1jOy5bJ3eJ/GIM2TODIxLgAUBpJn7+",
- "76/QAEiQBGeoxbKd6JOtIZZGo9HoHR8nqShKwYFrNTn4OCmppAVokPgXTVNRcZ2wzPyVgUolKzUTfHLg",
- "vxGlJePLyXTCzK8l1avJdMJpAU0b0386kfDPiknIJgdaVjCdqHQFBTUD601pWtcjrZOlSNwQh3aIo+eT",
- "T1s+0CyToFQfyp95viGMp3mVAdGSckVT80mRc6ZXRK+YIq4zYZwIDkQsiF61GpMFgzxTM7/If1YgN8Eq",
- "3eTDS/rUgJhIkUMfzmeimDMOHiqogao3hGhBMlhgoxXVxMxgYPUNtSAKqExXZCHkDlAtECG8wKticvDr",
- "RAHPQOJupcDO8L8LCfA7JJrKJejJ+2lscQsNMtGsiCztyGFfgqpyrQi2xTUu2RlwYnrNyKtKaTIHQjl5",
- "++Mz8ujRo6dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeZLISnPkrr92x+f4fzv3ALHtqJKQfywHJov",
- "5Oj50AJ8xwgJMa5hifvQon7TI3Iomp/nsBASRu6JbXytmxLO/0V3JaU6XZWCcR3ZF4Jfif0c5WFB9208",
- "rAag1b40mJJm0F/3k6fvPz6YPtj/9C+/Hib/4/588ujTyOU/q8fdgYFow7SSEni6SZYSKJ6WFeV9fLx1",
- "9KBWosozsqJnuPm0QFbv+hLT17LOM5pXhk5YKsVhvhSKUEdGGSxolWviJyYVzw2bMqM5aidMkVKKM5ZB",
- "NjXc93zF0hVJqbJDYDtyzvLc0GClIBuitfjqthymTyFKDFyXwgcu6OtFRrOuHZiANXKDJM2FgkSLHdeT",
- "v3Eoz0h4oTR3lbrYZUWOV0BwcvPBXraIO25oOs83ROO+ZoQqQom/mqaELchGVOQcNydnp9jfrcZgrSAG",
- "abg5rXvUHN4h9PWQEUHeXIgcKEfk+XPXRxlfsGUlQZHzFeiVu/MkqFJwBUTM/wGpNtv+f979/JoISV6B",
- "UnQJb2h6SoCnIhveYzdp7Ab/hxJmwwu1LGl6Gr+uc1awCMiv6JoVVUF4VcxBmv3y94MWRIKuJB8CyI64",
- "g84Kuu5PeiwrnuLmNtO2BDVDSkyVOd3MyNGCFHT9/f7UgaMIzXNSAs8YXxK95oNCmpl7N3iJFBXPRsgw",
- "2mxYcGuqElK2YJCRepQtkLhpdsHD+MXgaSSrABw/yCA49Sw7wOGwjtCMObrmCynpEgKSmZG/O86FX7U4",
- "BV4zODLf4KdSwhkTlao7DcCIU28Xr7nQkJQSFixCY+8cOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
- "TLhdmelf0XOq4LvHQxd483Xk7i9Ed9e37vio3cZGiT2SkXvRfHUHNi42tfqPUP7CuRVbJvbn3kay5bG5",
- "ShYsx2vmH2b/PBoqhUyghQh/8Si25FRXEg5O+H3zF0nIO015RmVmfinsT6+qXLN3bGl+yu1PL8WSpe/Y",
- "cgCZNaxRbQq7FfYfM16cHet1VGl4KcRpVYYLSlta6XxDjp4PbbId86KEeVirsqFWcbz2msZFe+h1vZED",
- "QA7irqSm4SlsJBhoabrAf9YLpCe6kL+bf8oyN711uYih1tCxu2/RNuBsBodlmbOUGiS+dZ/NV8MEwGoJ",
- "tGmxhxfqwccAxFKKEqRmdlBalkkuUponSlONI/2rhMXkYPIve41xZc92V3vB5C9Nr3fYycijVsZJaFle",
- "YIw3Rq5RW5iFYdD4CdmEZXsoETFuN9GQEjMsOIczyvWs0Uda/KA+wL+6mRp8W1HG4rujXw0inNiGc1BW",
- "vLUN7ygSoJ4gWgmiFaXNZS7m9Q93D8uywSB+PyxLiw8UDYGh1AVrprS6h8unzUkK5zl6PiM/hWOjnC14",
- "vjGXgxU1zN2wcLeWu8Vqw5FbQzPiHUVwO4Wcma3xaDAy/HVQHOoMK5EbqWcnrZjGf3VtQzIzv4/q/G2Q",
- "WIjbYeJCLcphziow+EugudztUE6fcJwtZ0YOu30vRzZmlDjBXIpWtu6nHXcLHmsUnktaWgDdF3uXMo4a",
- "mG1kYb0iNx3J6KIwB2c4oDWE6tJnbed5iEKCpNCB4YdcpKd/pWp1DWd+7sfqHz+chqyAZiDJiqrVbBKT",
- "MsLj1Yw25oiZhqi9k3kw1axe4nUtb8fSMqppsDQHb1wssajHfsj0QEZ0l5/xPzQn5rM524b122Fn5BgZ",
- "mLLH2XkQMqPKWwXBzmQaoIlBkMJq78Ro3ReC8lkzeXyfRu3RC2swcDvkFmGW3pgDD+dCXu5IdGidk8bI",
- "SagZNeAI087OYtOqTBx+IoYS26AzUONX2k7J3eFjuGph4Z2mnwELyox6HVhoD3TdWBBFyXK4hvO6inIi",
- "o7k+ekje/fXwyYOHvz188p3hGqUUS0kLMt9oUOSuUxiI0psc7vVXhiJ7lev46N899qax9rixcZSoZAoF",
- "LftDWZObvZdtM2La9bHWRjOuugZwzLE8BsNeLNqJtSYb0J4zZa79Yn4tmzGEsKyZJSMOkgx2EtNFl9dM",
- "swmXKDeyug79CqQUMmL0wSOmRSry5AykYiJiv3/jWhDXwstcZfd3Cy05p4qYudEeWXG85SKUpdccQWMa",
- "CrVLZrBDH695gxs3IJWSbnrot+uNrM7NO2Zf2sj35i1FSpCJXnOSwbxatsTzhRQFoSTDjnhxvGTLlQ7u",
- "0TdSiMW1S1TRWWJLwg9WCslNn74s8lpkYHTBSl0De28Ga7BnKCfEGZ2LShNKuMgAFcdKxRn/gPcR3R7o",
- "rdHhXaJXVrCYg1FSUlqZ1VYlQV9EjxabjglNLRUliBo1YKytrey2lZ3OerZyCTQzygtwIubOIupstbhI",
- "io4U7Vmnu3Yi6lwLrlKKFJQySqdVJXaC5ttZstRb8ISAI8D1LEQJsqDyksBqoWm+A1BsEwO3lhOdGbkP",
- "9bjpt21gd/JwG6k0eqelAiOUmgOXg4YhFI7EyRlINKd+1v3zk1x2+6pyINjBiVbHrED1lVMuFKSCZyo6",
- "WE6VTnYdW9OoJf+ZFQQnJXZSceABE8pLqrQ1qjOeoS5g2Q3OY20rZophgAevQDPyL/7264+dGj7JVaXq",
- "q1BVZSmkhiy2Bg7rLXO9hnU9l1gEY9f3rRakUrBr5CEsBeM7ZNmVWARRXduenNepvzi00Jh7YBNFZQuI",
- "BhHbAHnnWwXYDR2+A4AYxbHuiYTDVIdyai/zdKK0KEtz/nRS8brfEJre2daH+u9N2z5xUd3w9UyAmV17",
- "mBzk5xaz1tW/okZox5FJQU/N3YQiuLX+92E2hzFRjKeQbKN8cyzfmVbhEdhxSAe0HxdMFMzWORwd+o0S",
- "3SAR7NiFoQUPqGJvqNQsZSVKEn+DzbULVt0JolYrkoGmzKgHwQcrZJVhf2LdOd0xLydojZKa++D3xObI",
- "cnKm8MJoA38KGzRfv7FxAsdBdME1SIqRUc3pppwgoN77aC7ksAmsaarzjbnm9Ao25BwkEFXNC6a1Dfxo",
- "C5JalEk4QNQisWVGZxOyPna/A2OMVO9wqGB5/a2YTqzYsh2+447g0kKHE5hKIfIR7oEeMqIQjHIfkFKY",
- "XWcuzsgHo3hKagHphBg0CNbM845qoRlXQP5bVCSlHAWwSkN9IwiJbBavXzODucDqOZ2joMEQ5FCAlSvx",
- "y/373YXfv+/2nCmygHMfnGcadtFx/z5qSW+E0q3DdQ0qujluRxHejqYac1E4Ga7LU3Ybqt3IY3byTWfw",
- "2r5jzpRSjnDN8q/MADoncz1m7SGNjDPS47ijrDDB0LF1476jl/Tz6PDN0DHo+hMHvqXm45B7ychX+eYa",
- "+LQdiEgoJSg8VaFeouxXsQjjN92xUxuloeir9rbrbwOCzVsvFvSkTMFzxiEpBIdNNGWBcXiFH2O97cke",
- "6Iw8dqhvV2xqwd8Bqz3PGCq8Kn5xtwNSflP7Va9h87vjdqw6YeQqaqWQl4SSNGeoswqutKxSfcIpSsXB",
- "WY6Y+r2sP6wnPfNN4opZRG9yQ51wqgwOa1k5ap5cQEQL/hHAq0uqWi5B6Y58sAA44a4V46TiTONchdmv",
- "xG5YCRLt7TPbsqAbsqA5qnW/gxRkXun2jYkBdkobrcuamMw0RCxOONUkB6OBvmL8eI3D+Tg2TzMc9LmQ",
- "pzUWZtHzsAQOiqkk7pL4yX5FF6Zb/sq5MzHbwX62RhQzfhOFt9HQiuD/v3f/4+DXw+R/aPL7fvL03/be",
- "f3z86d793o8PP33//f9r//To0/f3/uNfYzvlYY+FfznIj547afLoOYoMjXGpB/uNWRwKxpMokR2vgBSM",
- "YxRxh7bIXSP4eAK615ip3K6fcL3mhpDOaM4yqi9HDl0W1zuL9nR0qKa1ER0F0q/1fcyluxRJSdNT9OhN",
- "lkyvqvksFcWel6L3lqKWqPcyCoXg+C3boyXbUyWke2cPdlzpV+BXJMKuOkz20gJB3x8YD9lEk6WLwsST",
- "t6i4JYpKOSMlRiR5v4xYTOuwXJuOd0AwZnNFvVPR/fnwyXeTaRNrWX83mrr9+j5yJli2jkXUZrCOSWru",
- "qOERu6NISTcKdJwPIexRF5T1W4TDFmBEfLVi5c3zHKXZPM4rfZyH0/jW/IjbAAxzEtE8u3FWH7G4ebi1",
- "BMig1KtYmk5L5sBWzW4CdFwqpRRnwKeEzWDW1biyJSjvDMuBLjBdBE2MYkzcWn0OLKF5qgiwHi5klFoT",
- "ox8Ukx3f/zSdODFCXbtk7waOwdWds7bF+r+1IHd+enFM9hzrVXdscLcdOgjHjVgyXMRZy9lmuJlNTrTR",
- "7Sf8hD+HBePMfD844RnVdG9OFUvVXqVA/kBzylOYLQU58EFsz6mmJ7wnsw3mDwfhg6Ss5jlLyWkoWzfk",
- "aXPC+iOcnPxqOP7Jyfue56YvCbupovzFTpCcM70SlU5c0ksi4ZzKLAK6qpMecGSbsrZt1ilxY1tW7JJq",
- "3PhxnkfLUnWDn/vLL8vcLD8gQ+VCe82WEaWF9FKNEXUsNLi/r4W7GCQ99xlTlQJFPhS0/JVx/Z4kJ9X+",
- "/iMgrWjgD054MDS5KaFl87pUcHbX3oULtxoSrLWkSUmXoKLL10BL3H2UvAu0ruY5wW6tKGQf0IJDNQvw",
- "+BjeAAvHhSMqcXHvbC+fvRxfAn7CLcQ2RtxonBaX3a8gLvnS29WJbe7tUqVXiTnb0VUpQ+J+Z+qkxqUR",
- "srwnSbElN4fA5X/OgaQrSE8hw1Q0KEq9mba6e2elE1k962DKpmzaqELMK0Lz4BxIVWbUCfWUb7oJHgq0",
- "9lktb+EUNseiSUu6SEZHO8FADR1UpNRAujTEGh5bN0Z3853jG4Oqy9LH6WPApieLg5oufJ/hg2xF3ms4",
- "xDGiaAXADyGCyggiLPEPoOASCzXjXYn0Y8sz+src3nyRDE/P+4lr0qhhznkdrgbj+u33AjD/W5wrMqdG",
- "bhcuddkG0QdcrFJ0CQMScmihHRmq3rLq4iC77r3oTScW3Qutd99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6",
- "AXAFM4IVSRzC5jmKSXW0hGU6VLYs5bbEwhBocQIGyRuBw4PRxkgo2ayo8lnVmHzuz/IoGeAzJoVsSwU8",
- "CrztQYZ5nejneW73nPa0S5cQ6LMAfepfqFqOSOMzEj4GgMW2Q3AUgDLIYWkXbht7QmkSVJoNMnD8vFjk",
- "jANJYo57qpRImU2Lb64ZNwcY+fg+IdaYTEaPECPjAGx0buHA5LUIzyZfXgRI7hJsqB8b3WLB3xAPu7Sh",
- "WUbkEaVh4YwPBNV5DkBdtEd9f3VijnAYwviUGDZ3RnPD5pzG1wzSy0hDsbWTf+bcq/eGxNkttnx7sVxo",
- "TfYqusxqQpnJAx0X6LZAvF2UiG2BQnw5W1aNq6G7dMzUA9f3EK7uBrlslwKgo+k3VZ+c5rdTQ2vfzf2b",
- "rGHp0yZH20eVxmh/iH6iuzSAv74Jos4+e9O9rqNKetvt2k68C+SnGCs2Z6Tv6+h7VBTkgBJx0pIgktOY",
- "B8wI9oDs9p3vFmjumN5H+eZe4MuXsGRKQ2OLNreSd67ctG2OYlUBIRbDq9OlXJj1vRWi5tE2bRU7tpZ5",
- "4ys4ExqSBZNKJ2jIjy7BNPpRoUb5o2kaFxTa0QK2wA7L4rwBpz2FTZKxvIrTq5v3b8/NtK9rI4yq5qew",
- "QXEQaLoicywIFY0h2jK1DTPbuuCXdsEv6bWtd9xpME3NxNKQS3uOb+RcdDjvNnYQIcAYcfR3bRClWxgk",
- "XvzPIdex9LBAaLCHMzMNZ9tMj73DlPmxd0ZfWCiG7yg7UnQtgba8dRUMfSRG3WM6qKfUT3kYOAO0LFm2",
- "7hgC7aiD6iK9kLbvE9U7WMDddYPtwEBg9ItF1UpQ7ZoEjXRrK2PxcG2zUZg5blcOCBlCOBVTvq5jH1GG",
- "tLH42C5cHQPN/wabX0xbXM7k03RyNbthDNduxB24flNvbxTP6OG2dqSWG+CCKKdlKcUZzRNnXR0iTSnO",
- "HGlic2+MvWFWF7fhHb84fPnGgf9pOklzoDKpRYXBVWG78ptZlS1/MHBAfN04o/B4md2KksHm1znboUX2",
- "fAWuRlcgjfaKiTTW9uAoOgvtIh5os9Pe6hwDdolbHARQ1v6BxnZl3QNtlwA9oyz3RiMP7UBQDC5uXEWa",
- "KFcIB7iyayHwECXXym56pzt+Ohrq2sGTwrm2VBErbKE8RQTv+o+NCIm2KCTVgmIpEGsS6DMnXhWJOX6J",
- "ylkaNzDyuTLEwa3jyDQm2HhAGDUjVmzAD8krFoxlmqkRim4HyGCOKDJ9WZkh3M2Fq3BccfbPCgjLgGvz",
- "SeKp7BxUrL3iTM3969TIDv253MDWPN0MfxUZIyyD073xEIjtAkbopuqB+7xWmf1Ca3OM+SGwx1/A2x3O",
- "2LsSt3iqHX04arYxgKu2uyksSNznf4YwbPG63dWQvfLq6vEMzBGtbsxUspDid4jreageR0LufeEfhiEe",
- "vwOfRTKXuiymtu40RZqb2Qe3e0i6Ca1QbQ/9ANXjzgc+KaxA4s2zlNuttsVGW4FecYIJgzP37PgNwTiY",
- "ewGtOT2f01h5FiNkGJgOG+9ny5CsBfGdPe6dzZu5WkwzEjhS67bMJqOVIJtsmH7i8yUFBjvtaFGhkQyQ",
- "akOZYGqdX7kSkWEqfk65rVlr+tmj5HorsMYv0+tcSEwlVXGbdwYpK2gelxwyxH479TZjS2YrtlYKgpKg",
- "biBb6tpSkSurav3LDWqOFmR/GhQddruRsTOm2DwHbPHAtphThZy8NkTVXczygOuVwuYPRzRfVTyTkOmV",
- "sohVgtRCHao3tedmDvocgJN9bPfgKbmLPivFzuCewaK7nycHD56i0dX+sR+7AFxp5m3cJEN28p+OncTp",
- "GJ12dgzDuN2os2hipK2nP8y4tpwm23XMWcKWjtftPksF5XQJ8TCJYgdMti/uJhrSOnjhmS0GrbQUG8J0",
- "fH7Q1PCngSBuw/4sGCQVRcF04TwbShSGnpp6n3ZSP5ytLO1KNXm4/Ed0EJbeP9JRIm/WaGrvt9iq0Y37",
- "mhbQRuuUUJs/nLPGde8LyJEjX4UAa1fVJassbsxcZuko5qAnf0FKybhGxaLSi+QvJF1RSVPD/mZD4Cbz",
- "7x5H6nW1S/TwiwF+43iXoECexVEvB8jeyxCuL7nLBU8Kw1Gye03SRHAqBz2Z8Wgxz9G7wYLbhx4rlJlR",
- "kkFyq1rkRgNOfSXC41sGvCIp1uu5ED1eeGU3TpmVjJMHrcwO/f3tSydlFELGatI0x91JHBK0ZHCGgWvx",
- "TTJjXnEvZD5qF64C/Zf1PHiRMxDL/FmOKQI/VCzPfmmSwDolDyXl6Spq95+bjr81xbfrJdtzHC2BsqKc",
- "Qx4dzt6Zv/m7NXL7/0OMnadgfGTbbilDu9zO4hrA22B6oPyEBr1M52aCEKvtrJg66jJfiozgPE29jYbK",
- "+tUZg3Jl/6xA6ViGAX6wkR9o3zF6ga2WRYBnKFXPyE/28ZwVkFY5AJRmWVHlNrUcsiVIZ3isylzQbErM",
- "OMcvDl8SO6vtY0vI2mpdSxTm2qvo6PVBcZ5xMYS+Gmw8vnn8ONsDLs2qlcbqHErToozlopkWx74BJryF",
- "tk4U80LszMhzK2ErL7/ZSQw9LJgsjGRaj2Z5PNKE+Y/WNF2h6NriJsMkP77MnKdKFbw3UNcNruvr4Lkz",
- "cLtKc7bQ3JQIo1+cM2XfTIEzaKe/1bmgTnXy6XDt5cmKc0spUR69LVf5Mmj3wFmHtjeHRiHrIP6Cgout",
- "0njRqnvvsFe0YEW3hF/voQGbAlXXg/VvYaWUC85SLBcRvNJSg+zeXxnjKxhRWaNrjPJH3J3QyOGKFg6s",
- "w4kcFgdLCXpG6BDXN1YGX82mWuqwf2p86GNFNVmCVo6zQTb19S+dvYRxBa5eEj7FE/BJIVv+F+SQUZde",
- "Upt+L0hGGDs/IAD/aL69duoRBpWeMo6CkEObi1+1Fg18HkIb6YlpshSg3HraCYTqV9Nnhrl0Gazfz/xz",
- "EjiGdV+YZVtfXX+oQ++5c54y0/aZaWsrJzQ/t8IU7aSHZekmHa6OGpUH9JoPIjjigUm8CTxAbj1+ONoW",
- "ctvqcsf71BAanKHDDkq8h3uEUVcK7ZRGPqN5ZSkKWxAb6hJNmGY8AsZLxqF57CRyQaTRKwE3Bs/rQD+V",
- "SqqtCDiKpx0DzdFLF2NoSjsT7VWH6mwwogTX6OcY3samyOkA46gbNIIb5Zv6jRVD3YEw8Qwfd3KI7Jcs",
- "RanKCVEZhh13ipjGGIdh3L5McvsC6B+Dvkxku2tJ7cm5yE00lEk2r7Il6IRmWazQ3A/4leBXklUoOcAa",
- "0qou1FWWJMUSDO2aFH1qcxOlgquq2DKXb3DF6VIRk6Nf4wTKx1U3g88Isl/Dep+/ePP2xbPD4xfP7X1h",
- "1HKbSmZkbgmFYYhGj1UajOhcKSAfQjR+wH4fOguOgxkUL44QbVhA2RMiBtTPN/hvrJjWMAE5n/qFo7q8",
- "Ax07Xli8b4/UE87N0UsUWybjMYFX39XR0Ux9ufPY9L/WA5mLZRuQG05z38aMwz2KseEX5n4Ls8B7FeLs",
- "DVgnaWMMlfDvIKB2W6cXtpkn3ri9knFou69L2m+3ngwXp5/iHT0QSRkk91MrBlhn0FA8ZToY/ku1y8LR",
- "lGzllFhRPjaCDcawleztc59RQ9hQAIaNvzCfe73HCbA9dQDH3opQH9nTB+hvPmyQlJQ5T2fDLPqYdQHG",
- "/ZDvMaGHzQZ3F+HCdnGQ2EriFcKH62w0tTXwGiiFYk1Vy1jp8JFhJcdY/TuoE9Ify/t0zyDVRqgPfFUS",
- "4CJVQ8xkwUMHt/U2BtSPOvrGldnYVlujX790B7PpZQAEWSy29uNsfCWJwzoiAf2k+NTAErh7a6Ad2zs6",
- "wnCxgFSzsx0ZF/9ptNQmmn/q9Vj7kE2QgMHqiDX/wPAF1esGoG0JEVvhCepPXRmcoXjrU9jcUaRFDdFi",
- "lFPP8y6TqIwYQO6QGBIRKubxs4Y354RhqqYMxIL3sNvu0JR8GawCHuQPXXIuT5KEhjlFW6Y8EzHNfdRc",
- "puuFMu0w+GooKaNfh3dYEHqOZY9V/YJD/YJwoNWQo345qHOXKI35MbWt2adMg/K/+WQ4O4t9mbqpU46W",
- "/XMqM98iqqp6LTjZch/1Mil8Ddku0It6ZtbEQ/Vj5yMFRjDqLc2FYnyZDIUOtkOQwlft0NGK1wEWOEa4",
- "FiDd+wTaP/ydaOHjp7bBsQ0V7gW2yyBBDRb1ssANptq/bWoJYJlEap99d07kcIFGb6UGOhlk/A/PuQ3Z",
- "z+x3Hyzuy+SN0MgdvSY7U/Z9JBxTPSSGVL8g7rbcHYR+Ga2XcW7fq1Gx9H9uUBlaj0spsiq1F3R4MBob",
- "w9jiGltYSVRhTPur7Mn+OZaaeRmk9JzCZs/K3+mK8qbmT/tYWxHKriFIoe3s9rUaBOK6T760C1heC5xf",
- "UqmeTkoh8mTAXHzUr2LQPQOnLD2FjJi7w8eQDFQCJ3fRSln7A89XG5+1X5bAIbs3I8So5UWpN9412C7I",
- "2Zmc39Hb5l/jrFllC4s4fX92wuPhT1jyQ16Rv/lhtnM1BYb5XXEqO8iOMgHrgQoKkp5H6uKPfasx4qzr",
- "1ipviMpCEZNSLpkzOup893X+COkHxbq3az9hSrnP+kyFtKYjlJa8QacrvLxqLELjyob7DjvAC5XioHC4",
- "50YOnC8cI/SqRkqwlEFKaC1/l57tnxit+VKwRQojkM0ylS0YJvpCZWBEUc9q20Qcz30TBuaPC441Nfqm",
- "D4WmRKxLGRKOOZfyjOY3b77AwgKHiA/3+k18oaH+GyLZolJdLlrhJR01d6DrXt/U/A2aW/4TzB5FbcBu",
- "KGdHrQu2+zpzWD+J5iQXzcMNOCQ5xzGt0fjBd2TuIlJLCSlTrBOsf+5L5tXqHlaQbR5F2q5f7lrnL0Jf",
- "gYydgiBK8ropv6UF3g8NhM0R/cJMZeDkRqk8Rn09sojgL8ajwtTQHdfFacuabMsZdqI5hIRrtioHbuwL",
- "WpX7Sa9jl4frwEunUtBf5+jbuoXbyEXdrG2sS6SP3GFPhp6P8WTES6+Z7uhKsQjBuoUEQSUfHnwgEhZY",
- "mFyQ+/dxgvv3p67ph4ftz+Y4378fFeNuzInSej/YzRujmF+Gov9shNtAoGlnPyqWZ7sIoxU23DwSgIGx",
- "v7kA6y/yTMFv1p7aP6quwPNF3LfdTUDERNbamjyYKggIHhEL7LrNoi88K0gryfQG8769+Y39Fq2n81Nt",
- "sXcenzpT0N19WpxCXTmgse9Xyt+uPwn7InRhZGp0nmt8MerFmhZlDu6gfH9n/u/w6C+Ps/1HD/59/pf9",
- "J/spPH7ydH+fPn1MHzx99AAe/uXJ4314sPju6fxh9vDxw/njh4+/e/I0ffT4wfzxd0///Y7hQwZkC+jE",
- "ZxlN/gvf8kgO3xwlxwbYBie0ZPVDcYaMfRlxmuJJhIKyfHLgf/rf/oTNUlE0w/tfJy6JYbLSulQHe3vn",
- "5+ezsMveEg16iRZVutrz8/Qf6HpzVAdY28RY3FEbO2tIATfVkcIhfnv74t0xOXxzNGsIZnIw2Z/tzx7g",
- "8zslcFqyycHkEf6Ep2eF+77niG1y8PHTdLK3Apqj/8v8UYCWLPWf1DldLkHOXD1189PZwz0vSux9dMbM",
- "T2bUZSwj3oaKB/HB/TLjzjGC8TY2FLxVtlO5KpLTupirszXwDCN4rX3QsLYaWUdZU7jtqGFUPn3d1vM5",
- "+DXyXs2CLSvZedqyjiJwlZ6ZIvZhdUmcSvOGpqdhlCwS5D8rkJuGYBwrCwvR+MKbLpa2UMuyHXjWiEmx",
- "R/Bi9dpxZrPPAaXWfoWGE2lZQQhJw1cNr9xPnr7/+OQvnyYjAEEnlwJMU/xA8/yDfXsU1ugp8In+LpFz",
- "GikyieLxtLFTY4dmm6YYOVd/DeuI123a8dofuODwYWgbHGDRfaB5bhoKDrE9eI+JdEgJeIge7u9f2wME",
- "dYqCjb+rR/EkcYmB+hzGfoq8TubfIRh4muzxNS60Hblz5eV2h+st+geaYW1nUNou5cE3u5Qjjn5mw/GJ",
- "vdE+TSdPvuG9OeKG59CcYMsgS71/i/ydn3Jxzn1LI81URUHlBmWVoAB9KJV+Gryt9sJiuXsfW17I7Ep3",
- "Wa9O+NHzHdfbHTXEFPvlmzq1eM33utos+qlcwWFYM6XVvRn5KeyNjBmzIW2uYSV58xpnKcUZywyLdQEc",
- "vmhEA9sdFSaKRi/bwLR7e+9+1nv3sG11aNX/iQHTIvGtMPXCFK568fWj0ztPqVzqqZKg6u8laid+1nru",
- "HaVv8IHsEQz2FndDj4sPiDcBvLWk067W/Pn5rtXfgmuidR98Rq78jQtrr2hu6CRYbidTzhbFuhXi/jRC",
- "XB25Zl9EwzqQ28Q6LPa+99HXMLsGUc7VcBshxIWabtA3qLF1t8Mp7s1sQbKwzeXYgYtC2ymeYWW5W8Hs",
- "cwtm/ZKMMTCaQntfThhDGFZNzcaLPEPWemLhQrUlv1Hp60+MrEFxy0C6W9C6BG/sCVGOE382nvmHFJ4c",
- "0m7Fpj+12GQDv7cITq16qS5LYFh2Au0Sw2yCcySrQGFwsh19SpSQLla2lExIpjdTwjjJwJw99BgKiaVh",
- "tKx4ag39dgrg+N9Xh/+FeQqvDv+LfE/2p7UIhpnzkeltJGhbBvoJdD/gWf2wOazFga2y0FcjYBzXSApS",
- "EULUa+FLniLSCrr+fghla+tXjIlnBV1Ptkoi029HWryq0NRJwexTERb+4QSd/v55vnb8rSKwpqnON4Ti",
- "/bOxiSKqmjf1StvihhZlEg4QjXHbMqN//SuWrX7REOBIYSF8ZWs7fMed2o4tdLgcUnxqb7dg0kNGFILL",
- "SXm3u/vN7m5fLCWlMGeaYeGq5j7xd1ULyOYNKAfuQHbDjPy3qDDYxT5xCrGi6zgDZoL4OZ0AGmQB5/jA",
- "bI2d+/e7C79/3+05U2QB58hBKceGXXTcv/8HEFnXda1rSrjgCccXOM+ABBFyt3LrVy23Ptl/9M2u5h3I",
- "M5YCOYaiFJJKlm/I33ldHPBqYnnNcyoelGvcyn96aVWNFB2I71fyXXd900w3kmEryzYwIdQPJTtdedq8",
- "tGR0eSzq5gvlqKl3nWDgn/Wq2P2Y9hwrs5iQHnhwftgcPR8jl38jjtDRxUUj91p8bz73DRCNp3l7M/E0",
- "45jp4/3HNwdBuAuvhSY/ornsM7P0z2o7iJNVwGwu7FFpPCYha3FZ61uZijmhU1cQHiuUb0idVWr4iWWE",
- "9kmoPtcwM4zlF1+xfX6nWThKl1303vKFW75wJb7QJaiGI2A2pNr7iK6CkB30jiRmc/2BXIyBv0WKwjtc",
- "BFmATlcuS7STFhNhK75G8TBP2faSzzX7/xDoSInMsNgXvjAzMns8SOBDpxfICPH97Ovwmc9sgTUA6vrT",
- "/sEqdOcw/4ZD/XyDe+SGKR9z7pNJzS5eCMpnzeT9NB1Ey3X4DG8RfDEE95jaC5cIb4+XW8QfISrdP7WQ",
- "kNeiyVV25Zf/iGaPz3kjf+4FvRYcrF/aSKyWFm9dkLW4gG/eIVJ8kQrreHTP6MdFhz3/UMtW+eGv9pmU",
- "rTLEmIvZTPZN3s5/jT4V17pAzNpmO5Prm9HG8F3T0NYSbVcR/YKKxRdhlV+htvElmNHNcA88pJ6FuBuf",
- "j+YnWK3F0uleXRtyiLnEy+2OZjRa1AFX0Qq5c8gFX6qvk8ts2/g4XiIEUBcijlcb/vMdy2dYCIYLX3PR",
- "lQZSjKdg3xjyz3EWTCkXFvh4/y83B6FmhS+nxsOsyC/MOD6nB+kmXT5YS7ku1eWDr6LlsxXLOqVng9o9",
- "Q/ytFaT1Ua9Z9mk3nwsc/RdkcYwHLC70TNGyBCovz9t2RxIdd2Y8eh6GuLaq99Z1lSKgGBRdMAjr3yYj",
- "DUGYKy0W7sqquAW0fg7ccgAXfyoW09rPbe5usTggJ/w+USv65MHD3x4++c7/+fDJdwOmLDOPK93SN2Y1",
- "A5nPdpgxFq0/bsRWW5CukXdw01t5sR2aTli2jpbqbMrxh+fCuY2RT9xRpKSbwQq/5Y7nBMJhm6cFbr6e",
- "ndJsHn8d22sk9Zt1R/yHWue0RddcFf7bZwQG4qwCJmIIrXlPoMb69qcFtkiBHbKsa7jftL7YhMHbW8wj",
- "T3YulC8qoOovpTcmqDYC9wJJGy1fThbEcrLTwClcvwJq5GlVlaWQuj7dajZKTIPByJxQShskXCeEpVSn",
- "q6rc+4j/wfpMn5pKSPaR2z3rrt4mh72zLa41ENmO2VS2bJcEcy50sSCvWCrFIRYadjeG2igNRS8o1XX9",
- "bdvzqdHbRfCccUgKwWPVxH7Gr6/wY7SUMQY3DnTGMNOhvt2Hq1vwd8BqzzOGuV0Vv1+JFnwlw0xntRLK",
- "OpkDTR9I/81pab0l0xyT1s97H1t/uqgS11KtKp2J86AvKkn23I/xOgfliMebjWvlolPWV5EMlKGub8+Q",
- "E+AhRtr110g9qKDo9GBJqD+paWfBeNYhEpTeUnEGUtVKv/QBILf2nT+OfWf8vgecq1K7mFWlrlcqeC0y",
- "sOO2S4XG8gO5yMCVV+wLA7W8E1eb/c3QtOsoMimtlitNqpJoEVOZmo4JTS3/tA9JqV0v79hW/oWJMyA0",
- "l0CzDZkDcCLmZtHtF8wIVfjOmde7nFQXf0CmgauUIgWlIEvqJ8l3gFYXrUQtTW/BEwKOANezECXIgspL",
- "AmvFm+2A6k7STg1uHQniJJg+1OOm37aB3cnDbaQSiL9F0ewiijIHZ3iJoHAkTtAuwD7z/vlJLrt9VYkP",
- "20eeQLJfj1mBlzCnXChIBc/U8ENlu44tFuMP1qLMCoKTEn0v3Aw8oBS8pEq/dQbj8D2XoOi/mWLLy2pD",
- "BafNyL/U5aZ7Y6eGX3JVqbomtdMTIYutgcN6y1yvYV3PhcZ4P3atiGpBKgW7Rh7CUjC+Q5YKn0rTgakd",
- "S/L3F4dFDqhTIvuobAHRIGIbIO98qwC7oRl4ABB8qLoM1QD3Lk8D11yIHCi39jxRlub86aTidb8hNL2z",
- "rQ/135u2feJyyeHI1zMBKjQSOMjPLWYVRpCvqCIODlLQU2dHWLoc7T7M5jAm6LdLtlG+OZbvTKvwCOw4",
- "pF2FNTz+rXPWORwd+o0S3SAR7NiFoQXHVOSvQva/qDzbdS58xmCGtokgEK9mHalw75wynSyEdG9n0oUG",
- "GdFvO9WWKdPKGa6sBU8L548jOIJjKG4c9wZjU67KJbhaEHyRBbP7/ZwKM9WPQo4K4m7HRVCmScU185Wy",
- "zHmrZcyvT4m+lZ5vpedb6flWer6Vnm+l51vp+VZ6/tzS85cKnk48n/bxK7GCG2TyTUr4txbrLdpIIKY6",
- "JcGI6OYcb83W0EBzXBDL8XIthRpM+8bH2ZSoZAokNdMxTsqcGmkI1toXHyNzquC7x/Xjpq5khHuezfAa",
- "0+DRQ/Lur4c+mmrlon7abe+6om1E6U0O91xWW/1+kk9vA24w6LLbqNd+Uhf1ZoX5BcuBKIOrF9j6OZxB",
- "biR5G6hBjC7S146OgebPHG52KEetF3LMaB+mLZ3Moa2gZfAMJa6VKkIx8q7zwM2C5mr4hRs7XkHLWPm2",
- "mk9btQlZww8i23TI3ezaHm5gm9CbmCrGqdxEgiV75N0jDS0M83GE1df7Pl175F+faPtktovC4u9Kq+ih",
- "3Ebl0ZC3esN6Q9mwy0WHTqLPu3XjvCY1gGNCHQw9+z0hb22/L5vqgxC5I9Zw5q/G8dxuWTMNbGsEKsd6",
- "vtW8HI/46OnFsz81hJ1VKeCL2Y7i1olptASeON6SzEW2SVqcqX3BZExRpaCY775kQtaIh6m+V8yX7VfQ",
- "l7khngeL28ZuQ3pYJ463DjBeG8w6ju3W2MIRHecNMP65ue8QhwxBII71xHTnbnXqC/KzZprNLU+75WnB",
- "aexc9oy7OOouE5ldjqfJjaz4MDt7sYa0MvOGh/SuumdYFmJ0rVuW+wzm1XJpBPa+FRqrJON4zcPkN83l",
- "7HLHMriLEYcdvC4tdNWs+O5wfcYRhAPfFZIspajKe7ZmPd+ggbMoKd94p4bR/Isqtzi0lTyul4faKObY",
- "S8PeuDZsl3vjzW+B9cndou3fLVrIOVXuxVnISMUxkTOW67DuPIK8G+PHa95w4K1PJNv1Rlbn5h3D/f0u",
- "u8jG2pFTgkz0mtsD1TpMLqfCntzZbfmsP8eN8Ma+DTHAYPv5AQ1D2H0xyIBl4c3QKabsr4Y2P31Lz8PS",
- "zNclNI7X1leAd2KtvUYqTxsxUgqapVShUYODPhfy9DPLknp9FLEiI5j4gkA/O25cWQwcd5RI2U5I9Vp5",
- "Ncd0bsG/dG2MJvPp0BVkamHj1rD7RzHs/uAPnyKUSHrePZzWh4NncgSboud6zaNcaq+0LxANxS+HqeW2",
- "5bVGYvSGbwdkBO//WIcy5CWhJM0ZupsFV1pWqT7hFB1awcL6xfhrN92wKPXMN4n7VCMuTzfUCTdC1YLU",
- "bq6oSLWAiAP7RwAvsalquQSlO5x4AXDCXSvGScWZxrkKlkqR2Iwkc10bjj6zLQu6IQuao0f2d5CCzI0S",
- "EValRveQ0izPXXSImYaIxQmnmuRgmP4rZgQ6M5z3INQRT5buaizEk5GXwEExlcStsz/Zr5jv65bvvQDo",
- "rLCffZ7eTSf6ethZNgj50XP3YsTRcywC3sSF9GC/sWCBgvEkSmTmxnfxVV3aIneNjOcJ6F4TYeJ2/YQb",
- "YVoLgoye6suRQ9ep2zuL9nR0qKa1ER3fr1/r+1i1wqVIjMpIl+b3JdOraj5LRbHnqxjuLUVd0XAvo1AI",
- "jt+yPVqyPVVCunf2YId8cAV+RSLs6vbm/gMlEQV0YE5LvfH4Ml537wfu5Wt4oOvrfpVrZ8Dp7RtYt29g",
- "3b6SdPsG1u3u3r6BdftC1O0LUX/WF6JmWyVEVyVw55stumfapERCameuGXjYrPW6S98ryfSMkOOV4f/U",
- "3AFwBpLmJKXKCkbcxj0XWBtRVWkKkB2c8KQFia2IaCa+2/zXqrkn1f7+IyD797p9rN0i4Lz9viiq4id0",
- "NZHvycnkZNIbSUIhzsCVlMbmWYXhL7bXzmH/Vz3uz7K3dQXdWOPKipYlmGtNVYsFS5lFeS6MMrAUnWht",
- "LvALSAOcrdFGmLbPaiE+McrdxcRQVwApJnT37/ej4O2YXQ/odCtp3WjtxT+ugL2NT/U37Pp44Naxewzx",
- "lmXcBMv44kzjD/TCxu1jGl/ZgkJHauu1rKsU5ikhZQuWxuxOXkay5mTDm3EESCvJ9AZvOFqy307B/P+9",
- "4eMK5Jm//CqZTw4mK63Lg709fM9yJZTem5irqfmmOh/N/UCXdgR3uZSSneFbOO8//f8AAAD//xjSW+CP",
- "MgEA",
-}
-
-// GetSwagger returns the Swagger specification corresponding to the generated code
-// in this file.
-func GetSwagger() (*openapi3.Swagger, error) {
- zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
- if err != nil {
- return nil, fmt.Errorf("error base64 decoding spec: %s", err)
- }
- zr, err := gzip.NewReader(bytes.NewReader(zipped))
- if err != nil {
- return nil, fmt.Errorf("error decompressing spec: %s", err)
- }
- var buf bytes.Buffer
- _, err = buf.ReadFrom(zr)
- if err != nil {
- return nil, fmt.Errorf("error decompressing spec: %s", err)
- }
-
- swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(buf.Bytes())
- if err != nil {
- return nil, fmt.Errorf("error loading Swagger: %s", err)
- }
- return swagger, nil
-}
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
deleted file mode 100644
index b52d1286f..000000000
--- a/daemon/algod/api/server/v2/generated/types.go
+++ /dev/null
@@ -1,919 +0,0 @@
-// Package generated provides primitives to interact the openapi HTTP API.
-//
-// Code generated by github.com/algorand/oapi-codegen DO NOT EDIT.
-package generated
-
-import (
- "encoding/json"
- "time"
-)
-
-// Account defines model for Account.
-type Account struct {
-
- // the account public key
- Address string `json:"address"`
-
- // \[algo\] total number of MicroAlgos in the account
- Amount uint64 `json:"amount"`
-
- // specifies the amount of MicroAlgos in the account, without the pending rewards.
- AmountWithoutPendingRewards uint64 `json:"amount-without-pending-rewards"`
-
- // \[appl\] applications local data stored in this account.
- //
- // Note the raw object uses `map[int] -> AppLocalState` for this type.
- AppsLocalState *[]ApplicationLocalState `json:"apps-local-state,omitempty"`
-
- // \[teap\] the sum of all extra application program pages for this account.
- AppsTotalExtraPages *uint64 `json:"apps-total-extra-pages,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- AppsTotalSchema *ApplicationStateSchema `json:"apps-total-schema,omitempty"`
-
- // \[asset\] assets held by this account.
- //
- // Note the raw object uses `map[int] -> AssetHolding` for this type.
- Assets *[]AssetHolding `json:"assets,omitempty"`
-
- // \[spend\] the address against which signing should be checked. If empty, the address of the current account is used. This field can be updated in any transaction by setting the RekeyTo field.
- AuthAddr *string `json:"auth-addr,omitempty"`
-
- // \[appp\] parameters of applications created by this account including app global data.
- //
- // Note: the raw account uses `map[int] -> AppParams` for this type.
- CreatedApps *[]Application `json:"created-apps,omitempty"`
-
- // \[apar\] parameters of assets created by this account.
- //
- // Note: the raw account uses `map[int] -> Asset` for this type.
- CreatedAssets *[]Asset `json:"created-assets,omitempty"`
-
- // MicroAlgo balance required by the account.
- //
- // The requirement grows based on asset and application usage.
- MinBalance uint64 `json:"min-balance"`
-
- // AccountParticipation describes the parameters used by this account in consensus protocol.
- Participation *AccountParticipation `json:"participation,omitempty"`
-
- // amount of MicroAlgos of pending rewards in this account.
- PendingRewards uint64 `json:"pending-rewards"`
-
- // \[ebase\] used as part of the rewards computation. Only applicable to accounts which are participating.
- RewardBase *uint64 `json:"reward-base,omitempty"`
-
- // \[ern\] total rewards of MicroAlgos the account has received, including pending rewards.
- Rewards uint64 `json:"rewards"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-
- // Indicates what type of signature is used by this account, must be one of:
- // * sig
- // * msig
- // * lsig
- SigType *string `json:"sig-type,omitempty"`
-
- // \[onl\] delegation status of the account's MicroAlgos
- // * Offline - indicates that the associated account is delegated.
- // * Online - indicates that the associated account used as part of the delegation pool.
- // * NotParticipating - indicates that the associated account is neither a delegator nor a delegate.
- Status string `json:"status"`
-
- // The count of all applications that have been opted in, equivalent to the count of application local data (AppLocalState objects) stored in this account.
- TotalAppsOptedIn uint64 `json:"total-apps-opted-in"`
-
- // The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.
- TotalAssetsOptedIn uint64 `json:"total-assets-opted-in"`
-
- // The count of all apps (AppParams objects) created by this account.
- TotalCreatedApps uint64 `json:"total-created-apps"`
-
- // The count of all assets (AssetParams objects) created by this account.
- TotalCreatedAssets uint64 `json:"total-created-assets"`
-}
-
-// AccountParticipation defines model for AccountParticipation.
-type AccountParticipation struct {
-
- // \[sel\] Selection public key (if any) currently registered for this round.
- SelectionParticipationKey []byte `json:"selection-participation-key"`
-
- // \[stprf\] Root of the state proof key (if any)
- StateProofKey *[]byte `json:"state-proof-key,omitempty"`
-
- // \[voteFst\] First round for which this participation is valid.
- VoteFirstValid uint64 `json:"vote-first-valid"`
-
- // \[voteKD\] Number of subkeys in each batch of participation keys.
- VoteKeyDilution uint64 `json:"vote-key-dilution"`
-
- // \[voteLst\] Last round for which this participation is valid.
- VoteLastValid uint64 `json:"vote-last-valid"`
-
- // \[vote\] root participation public key (if any) currently registered for this round.
- VoteParticipationKey []byte `json:"vote-participation-key"`
-}
-
-// AccountStateDelta defines model for AccountStateDelta.
-type AccountStateDelta struct {
- Address string `json:"address"`
-
- // Application state delta.
- Delta StateDelta `json:"delta"`
-}
-
-// Application defines model for Application.
-type Application struct {
-
- // \[appidx\] application index.
- Id uint64 `json:"id"`
-
- // Stores the global information associated with an application.
- Params ApplicationParams `json:"params"`
-}
-
-// ApplicationLocalState defines model for ApplicationLocalState.
-type ApplicationLocalState struct {
-
- // The application which this local state is for.
- Id uint64 `json:"id"`
-
- // Represents a key-value store for use in an application.
- KeyValue *TealKeyValueStore `json:"key-value,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- Schema ApplicationStateSchema `json:"schema"`
-}
-
-// ApplicationParams defines model for ApplicationParams.
-type ApplicationParams struct {
-
- // \[approv\] approval program.
- ApprovalProgram []byte `json:"approval-program"`
-
- // \[clearp\] approval program.
- ClearStateProgram []byte `json:"clear-state-program"`
-
- // The address that created this application. This is the address where the parameters and global state for this application can be found.
- Creator string `json:"creator"`
-
- // \[epp\] the amount of extra program pages available to this app.
- ExtraProgramPages *uint64 `json:"extra-program-pages,omitempty"`
-
- // Represents a key-value store for use in an application.
- GlobalState *TealKeyValueStore `json:"global-state,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- GlobalStateSchema *ApplicationStateSchema `json:"global-state-schema,omitempty"`
-
- // Specifies maximums on the number of each type that may be stored.
- LocalStateSchema *ApplicationStateSchema `json:"local-state-schema,omitempty"`
-}
-
-// ApplicationStateSchema defines model for ApplicationStateSchema.
-type ApplicationStateSchema struct {
-
- // \[nbs\] num of byte slices.
- NumByteSlice uint64 `json:"num-byte-slice"`
-
- // \[nui\] num of uints.
- NumUint uint64 `json:"num-uint"`
-}
-
-// Asset defines model for Asset.
-type Asset struct {
-
- // unique asset identifier
- Index uint64 `json:"index"`
-
- // AssetParams specifies the parameters for an asset.
- //
- // \[apar\] when part of an AssetConfig transaction.
- //
- // Definition:
- // data/transactions/asset.go : AssetParams
- Params AssetParams `json:"params"`
-}
-
-// AssetHolding defines model for AssetHolding.
-type AssetHolding struct {
-
- // \[a\] number of units held.
- Amount uint64 `json:"amount"`
-
- // Asset ID of the holding.
- AssetId uint64 `json:"asset-id"`
-
- // \[f\] whether or not the holding is frozen.
- IsFrozen bool `json:"is-frozen"`
-}
-
-// AssetParams defines model for AssetParams.
-type AssetParams struct {
-
- // \[c\] Address of account used to clawback holdings of this asset. If empty, clawback is not permitted.
- Clawback *string `json:"clawback,omitempty"`
-
- // The address that created this asset. This is the address where the parameters for this asset can be found, and also the address where unwanted asset units can be sent in the worst case.
- Creator string `json:"creator"`
-
- // \[dc\] The number of digits to use after the decimal point when displaying this asset. If 0, the asset is not divisible. If 1, the base unit of the asset is in tenths. If 2, the base unit of the asset is in hundredths, and so on. This value must be between 0 and 19 (inclusive).
- Decimals uint64 `json:"decimals"`
-
- // \[df\] Whether holdings of this asset are frozen by default.
- DefaultFrozen *bool `json:"default-frozen,omitempty"`
-
- // \[f\] Address of account used to freeze holdings of this asset. If empty, freezing is not permitted.
- Freeze *string `json:"freeze,omitempty"`
-
- // \[m\] Address of account used to manage the keys of this asset and to destroy it.
- Manager *string `json:"manager,omitempty"`
-
- // \[am\] A commitment to some unspecified asset metadata. The format of this metadata is up to the application.
- MetadataHash *[]byte `json:"metadata-hash,omitempty"`
-
- // \[an\] Name of this asset, as supplied by the creator. Included only when the asset name is composed of printable utf-8 characters.
- Name *string `json:"name,omitempty"`
-
- // Base64 encoded name of this asset, as supplied by the creator.
- NameB64 *[]byte `json:"name-b64,omitempty"`
-
- // \[r\] Address of account holding reserve (non-minted) units of this asset.
- Reserve *string `json:"reserve,omitempty"`
-
- // \[t\] The total number of units of this asset.
- Total uint64 `json:"total"`
-
- // \[un\] Name of a unit of this asset, as supplied by the creator. Included only when the name of a unit of this asset is composed of printable utf-8 characters.
- UnitName *string `json:"unit-name,omitempty"`
-
- // Base64 encoded name of a unit of this asset, as supplied by the creator.
- UnitNameB64 *[]byte `json:"unit-name-b64,omitempty"`
-
- // \[au\] URL where more information about the asset can be retrieved. Included only when the URL is composed of printable utf-8 characters.
- Url *string `json:"url,omitempty"`
-
- // Base64 encoded URL where more information about the asset can be retrieved.
- UrlB64 *[]byte `json:"url-b64,omitempty"`
-}
-
-// BuildVersion defines model for BuildVersion.
-type BuildVersion struct {
- Branch string `json:"branch"`
- BuildNumber uint64 `json:"build_number"`
- Channel string `json:"channel"`
- CommitHash string `json:"commit_hash"`
- Major uint64 `json:"major"`
- Minor uint64 `json:"minor"`
-}
-
-// DryrunRequest defines model for DryrunRequest.
-type DryrunRequest struct {
- Accounts []Account `json:"accounts"`
- Apps []Application `json:"apps"`
-
- // LatestTimestamp is available to some TEAL scripts. Defaults to the latest confirmed timestamp this algod is attached to.
- LatestTimestamp uint64 `json:"latest-timestamp"`
-
- // ProtocolVersion specifies a specific version string to operate under, otherwise whatever the current protocol of the network this algod is running in.
- ProtocolVersion string `json:"protocol-version"`
-
- // Round is available to some TEAL scripts. Defaults to the current round on the network this algod is attached to.
- Round uint64 `json:"round"`
- Sources []DryrunSource `json:"sources"`
- Txns []json.RawMessage `json:"txns"`
-}
-
-// DryrunSource defines model for DryrunSource.
-type DryrunSource struct {
- AppIndex uint64 `json:"app-index"`
-
- // FieldName is what kind of sources this is. If lsig then it goes into the transactions[this.TxnIndex].LogicSig. If approv or clearp it goes into the Approval Program or Clear State Program of application[this.AppIndex].
- FieldName string `json:"field-name"`
- Source string `json:"source"`
- TxnIndex uint64 `json:"txn-index"`
-}
-
-// DryrunState defines model for DryrunState.
-type DryrunState struct {
-
- // Evaluation error if any
- Error *string `json:"error,omitempty"`
-
- // Line number
- Line uint64 `json:"line"`
-
- // Program counter
- Pc uint64 `json:"pc"`
- Scratch *[]TealValue `json:"scratch,omitempty"`
- Stack []TealValue `json:"stack"`
-}
-
-// DryrunTxnResult defines model for DryrunTxnResult.
-type DryrunTxnResult struct {
- AppCallMessages *[]string `json:"app-call-messages,omitempty"`
- AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"`
-
- // Budget added during execution of app call transaction.
- BudgetAdded *uint64 `json:"budget-added,omitempty"`
-
- // Budget consumed during execution of app call transaction.
- BudgetConsumed *uint64 `json:"budget-consumed,omitempty"`
-
- // Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.
- Cost *uint64 `json:"cost,omitempty"`
-
- // Disassembled program line by line.
- Disassembly []string `json:"disassembly"`
-
- // Application state delta.
- GlobalDelta *StateDelta `json:"global-delta,omitempty"`
- LocalDeltas *[]AccountStateDelta `json:"local-deltas,omitempty"`
-
- // Disassembled lsig program line by line.
- LogicSigDisassembly *[]string `json:"logic-sig-disassembly,omitempty"`
- LogicSigMessages *[]string `json:"logic-sig-messages,omitempty"`
- LogicSigTrace *[]DryrunState `json:"logic-sig-trace,omitempty"`
- Logs *[][]byte `json:"logs,omitempty"`
-}
-
-// ErrorResponse defines model for ErrorResponse.
-type ErrorResponse struct {
- Data *map[string]interface{} `json:"data,omitempty"`
- Message string `json:"message"`
-}
-
-// EvalDelta defines model for EvalDelta.
-type EvalDelta struct {
-
- // \[at\] delta action.
- Action uint64 `json:"action"`
-
- // \[bs\] bytes value.
- Bytes *string `json:"bytes,omitempty"`
-
- // \[ui\] uint value.
- Uint *uint64 `json:"uint,omitempty"`
-}
-
-// EvalDeltaKeyValue defines model for EvalDeltaKeyValue.
-type EvalDeltaKeyValue struct {
- Key string `json:"key"`
-
- // Represents a TEAL value delta.
- Value EvalDelta `json:"value"`
-}
-
-// LightBlockHeaderProof defines model for LightBlockHeaderProof.
-type LightBlockHeaderProof struct {
-
- // The index of the light block header in the vector commitment tree
- Index uint64 `json:"index"`
-
- // The encoded proof.
- Proof []byte `json:"proof"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
-
-// ParticipationKey defines model for ParticipationKey.
-type ParticipationKey struct {
-
- // Address the key was generated for.
- Address string `json:"address"`
-
- // When registered, this is the first round it may be used.
- EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
-
- // When registered, this is the last round it may be used.
- EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
-
- // The key's ParticipationID.
- Id string `json:"id"`
-
- // AccountParticipation describes the parameters used by this account in consensus protocol.
- Key AccountParticipation `json:"key"`
-
- // Round when this key was last used to propose a block.
- LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
-
- // Round when this key was last used to generate a state proof.
- LastStateProof *uint64 `json:"last-state-proof,omitempty"`
-
- // Round when this key was last used to vote.
- LastVote *uint64 `json:"last-vote,omitempty"`
-}
-
-// PendingTransactionResponse defines model for PendingTransactionResponse.
-type PendingTransactionResponse struct {
-
- // The application index if the transaction was found and it created an application.
- ApplicationIndex *uint64 `json:"application-index,omitempty"`
-
- // The number of the asset's unit that were transferred to the close-to address.
- AssetClosingAmount *uint64 `json:"asset-closing-amount,omitempty"`
-
- // The asset index if the transaction was found and it created an asset.
- AssetIndex *uint64 `json:"asset-index,omitempty"`
-
- // Rewards in microalgos applied to the close remainder to account.
- CloseRewards *uint64 `json:"close-rewards,omitempty"`
-
- // Closing amount for the transaction.
- ClosingAmount *uint64 `json:"closing-amount,omitempty"`
-
- // The round where this transaction was confirmed, if present.
- ConfirmedRound *uint64 `json:"confirmed-round,omitempty"`
-
- // Application state delta.
- GlobalStateDelta *StateDelta `json:"global-state-delta,omitempty"`
-
- // Inner transactions produced by application execution.
- InnerTxns *[]PendingTransactionResponse `json:"inner-txns,omitempty"`
-
- // \[ld\] Local state key/value changes for the application being executed by this transaction.
- LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
-
- // \[lg\] Logs for the application being executed by this transaction.
- Logs *[][]byte `json:"logs,omitempty"`
-
- // Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
- PoolError string `json:"pool-error"`
-
- // Rewards in microalgos applied to the receiver account.
- ReceiverRewards *uint64 `json:"receiver-rewards,omitempty"`
-
- // Rewards in microalgos applied to the sender account.
- SenderRewards *uint64 `json:"sender-rewards,omitempty"`
-
- // The raw signed transaction.
- Txn map[string]interface{} `json:"txn"`
-}
-
-// StateDelta defines model for StateDelta.
-type StateDelta []EvalDeltaKeyValue
-
-// StateProof defines model for StateProof.
-type StateProof struct {
-
- // Represents the message that the state proofs are attesting to.
- Message StateProofMessage `json:"Message"`
-
- // The encoded StateProof for the message.
- StateProof []byte `json:"StateProof"`
-}
-
-// StateProofMessage defines model for StateProofMessage.
-type StateProofMessage struct {
-
- // The vector commitment root on all light block headers within a state proof interval.
- BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"`
-
- // The first round the message attests to.
- FirstAttestedRound uint64 `json:"FirstAttestedRound"`
-
- // The last round the message attests to.
- LastAttestedRound uint64 `json:"LastAttestedRound"`
-
- // An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.
- LnProvenWeight uint64 `json:"LnProvenWeight"`
-
- // The vector commitment root of the top N accounts to sign the next StateProof.
- VotersCommitment []byte `json:"VotersCommitment"`
-}
-
-// TealKeyValue defines model for TealKeyValue.
-type TealKeyValue struct {
- Key string `json:"key"`
-
- // Represents a TEAL value.
- Value TealValue `json:"value"`
-}
-
-// TealKeyValueStore defines model for TealKeyValueStore.
-type TealKeyValueStore []TealKeyValue
-
-// TealValue defines model for TealValue.
-type TealValue struct {
-
- // \[tb\] bytes value.
- Bytes string `json:"bytes"`
-
- // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
- Type uint64 `json:"type"`
-
- // \[ui\] uint value.
- Uint uint64 `json:"uint"`
-}
-
-// Version defines model for Version.
-type Version struct {
- Build BuildVersion `json:"build"`
- GenesisHashB64 []byte `json:"genesis_hash_b64"`
- GenesisId string `json:"genesis_id"`
- Versions []string `json:"versions"`
-}
-
-// AccountId defines model for account-id.
-type AccountId string
-
-// Address defines model for address.
-type Address string
-
-// AddressRole defines model for address-role.
-type AddressRole string
-
-// AfterTime defines model for after-time.
-type AfterTime time.Time
-
-// AssetId defines model for asset-id.
-type AssetId uint64
-
-// BeforeTime defines model for before-time.
-type BeforeTime time.Time
-
-// Catchpoint defines model for catchpoint.
-type Catchpoint string
-
-// CurrencyGreaterThan defines model for currency-greater-than.
-type CurrencyGreaterThan uint64
-
-// CurrencyLessThan defines model for currency-less-than.
-type CurrencyLessThan uint64
-
-// ExcludeCloseTo defines model for exclude-close-to.
-type ExcludeCloseTo bool
-
-// Format defines model for format.
-type Format string
-
-// Limit defines model for limit.
-type Limit uint64
-
-// Max defines model for max.
-type Max uint64
-
-// MaxRound defines model for max-round.
-type MaxRound uint64
-
-// MinRound defines model for min-round.
-type MinRound uint64
-
-// Next defines model for next.
-type Next string
-
-// NotePrefix defines model for note-prefix.
-type NotePrefix string
-
-// Round defines model for round.
-type Round uint64
-
-// RoundNumber defines model for round-number.
-type RoundNumber uint64
-
-// SigType defines model for sig-type.
-type SigType string
-
-// TxId defines model for tx-id.
-type TxId string
-
-// TxType defines model for tx-type.
-type TxType string
-
-// AccountApplicationResponse defines model for AccountApplicationResponse.
-type AccountApplicationResponse struct {
-
- // Stores local state associated with an application.
- AppLocalState *ApplicationLocalState `json:"app-local-state,omitempty"`
-
- // Stores the global information associated with an application.
- CreatedApp *ApplicationParams `json:"created-app,omitempty"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-}
-
-// AccountAssetResponse defines model for AccountAssetResponse.
-type AccountAssetResponse struct {
-
- // Describes an asset held by an account.
- //
- // Definition:
- // data/basics/userBalance.go : AssetHolding
- AssetHolding *AssetHolding `json:"asset-holding,omitempty"`
-
- // AssetParams specifies the parameters for an asset.
- //
- // \[apar\] when part of an AssetConfig transaction.
- //
- // Definition:
- // data/transactions/asset.go : AssetParams
- CreatedAsset *AssetParams `json:"created-asset,omitempty"`
-
- // The round for which this information is relevant.
- Round uint64 `json:"round"`
-}
-
-// AccountResponse defines model for AccountResponse.
-type AccountResponse Account
-
-// ApplicationResponse defines model for ApplicationResponse.
-type ApplicationResponse Application
-
-// AssetResponse defines model for AssetResponse.
-type AssetResponse Asset
-
-// BlockHashResponse defines model for BlockHashResponse.
-type BlockHashResponse struct {
-
- // Block header hash.
- BlockHash string `json:"blockHash"`
-}
-
-// BlockResponse defines model for BlockResponse.
-type BlockResponse struct {
-
- // Block header data.
- Block map[string]interface{} `json:"block"`
-
- // Optional certificate object. This is only included when the format is set to message pack.
- Cert *map[string]interface{} `json:"cert,omitempty"`
-}
-
-// CatchpointAbortResponse defines model for CatchpointAbortResponse.
-type CatchpointAbortResponse struct {
-
- // Catchup abort response string
- CatchupMessage string `json:"catchup-message"`
-}
-
-// CatchpointStartResponse defines model for CatchpointStartResponse.
-type CatchpointStartResponse struct {
-
- // Catchup start response string
- CatchupMessage string `json:"catchup-message"`
-}
-
-// CompileResponse defines model for CompileResponse.
-type CompileResponse struct {
-
- // base32 SHA512_256 of program bytes (Address style)
- Hash string `json:"hash"`
-
- // base64 encoded program bytes
- Result string `json:"result"`
-
- // JSON of the source map
- Sourcemap *map[string]interface{} `json:"sourcemap,omitempty"`
-}
-
-// DisassembleResponse defines model for DisassembleResponse.
-type DisassembleResponse struct {
-
- // disassembled Teal code
- Result string `json:"result"`
-}
-
-// DryrunResponse defines model for DryrunResponse.
-type DryrunResponse struct {
- Error string `json:"error"`
-
- // Protocol version is the protocol version Dryrun was operated under.
- ProtocolVersion string `json:"protocol-version"`
- Txns []DryrunTxnResult `json:"txns"`
-}
-
-// LightBlockHeaderProofResponse defines model for LightBlockHeaderProofResponse.
-type LightBlockHeaderProofResponse LightBlockHeaderProof
-
-// NodeStatusResponse defines model for NodeStatusResponse.
-type NodeStatusResponse struct {
-
- // The current catchpoint that is being caught up to
- Catchpoint *string `json:"catchpoint,omitempty"`
-
- // The number of blocks that have already been obtained by the node as part of the catchup
- CatchpointAcquiredBlocks *uint64 `json:"catchpoint-acquired-blocks,omitempty"`
-
- // The number of accounts from the current catchpoint that have been processed so far as part of the catchup
- CatchpointProcessedAccounts *uint64 `json:"catchpoint-processed-accounts,omitempty"`
-
- // The total number of accounts included in the current catchpoint
- CatchpointTotalAccounts *uint64 `json:"catchpoint-total-accounts,omitempty"`
-
- // The total number of blocks that are required to complete the current catchpoint catchup
- CatchpointTotalBlocks *uint64 `json:"catchpoint-total-blocks,omitempty"`
-
- // The number of accounts from the current catchpoint that have been verified so far as part of the catchup
- CatchpointVerifiedAccounts *uint64 `json:"catchpoint-verified-accounts,omitempty"`
-
- // CatchupTime in nanoseconds
- CatchupTime uint64 `json:"catchup-time"`
-
- // The last catchpoint seen by the node
- LastCatchpoint *string `json:"last-catchpoint,omitempty"`
-
- // LastRound indicates the last round seen
- LastRound uint64 `json:"last-round"`
-
- // LastVersion indicates the last consensus version supported
- LastVersion string `json:"last-version"`
-
- // NextVersion of consensus protocol to use
- NextVersion string `json:"next-version"`
-
- // NextVersionRound is the round at which the next consensus version will apply
- NextVersionRound uint64 `json:"next-version-round"`
-
- // NextVersionSupported indicates whether the next consensus version is supported by this node
- NextVersionSupported bool `json:"next-version-supported"`
-
- // StoppedAtUnsupportedRound indicates that the node does not support the new rounds and has stopped making progress
- StoppedAtUnsupportedRound bool `json:"stopped-at-unsupported-round"`
-
- // TimeSinceLastRound in nanoseconds
- TimeSinceLastRound uint64 `json:"time-since-last-round"`
-}
-
-// ParticipationKeyResponse defines model for ParticipationKeyResponse.
-type ParticipationKeyResponse ParticipationKey
-
-// ParticipationKeysResponse defines model for ParticipationKeysResponse.
-type ParticipationKeysResponse []ParticipationKey
-
-// PendingTransactionsResponse defines model for PendingTransactionsResponse.
-type PendingTransactionsResponse struct {
-
- // An array of signed transaction objects.
- TopTransactions []map[string]interface{} `json:"top-transactions"`
-
- // Total number of transactions in the pool.
- TotalTransactions uint64 `json:"total-transactions"`
-}
-
-// PostParticipationResponse defines model for PostParticipationResponse.
-type PostParticipationResponse struct {
-
- // encoding of the participation ID.
- PartId string `json:"partId"`
-}
-
-// PostTransactionsResponse defines model for PostTransactionsResponse.
-type PostTransactionsResponse struct {
-
- // encoding of the transaction hash.
- TxId string `json:"txId"`
-}
-
-// StateProofResponse defines model for StateProofResponse.
-type StateProofResponse StateProof
-
-// SupplyResponse defines model for SupplyResponse.
-type SupplyResponse struct {
-
- // Round
- CurrentRound uint64 `json:"current_round"`
-
- // OnlineMoney
- OnlineMoney uint64 `json:"online-money"`
-
- // TotalMoney
- TotalMoney uint64 `json:"total-money"`
-}
-
-// TransactionParametersResponse defines model for TransactionParametersResponse.
-type TransactionParametersResponse struct {
-
- // ConsensusVersion indicates the consensus protocol version
- // as of LastRound.
- ConsensusVersion string `json:"consensus-version"`
-
- // Fee is the suggested transaction fee
- // Fee is in units of micro-Algos per byte.
- // Fee may fall to zero but transactions must still have a fee of
- // at least MinTxnFee for the current network protocol.
- Fee uint64 `json:"fee"`
-
- // GenesisHash is the hash of the genesis block.
- GenesisHash []byte `json:"genesis-hash"`
-
- // GenesisID is an ID listed in the genesis block.
- GenesisId string `json:"genesis-id"`
-
- // LastRound indicates the last round seen
- LastRound uint64 `json:"last-round"`
-
- // The minimum transaction fee (not per byte) required for the
- // txn to validate for the current network protocol.
- MinFee uint64 `json:"min-fee"`
-}
-
-// TransactionProofResponse defines model for TransactionProofResponse.
-type TransactionProofResponse struct {
-
- // The type of hash function used to create the proof, must be one of:
- // * sha512_256
- // * sha256
- Hashtype string `json:"hashtype"`
-
- // Index of the transaction in the block's payset.
- Idx uint64 `json:"idx"`
-
- // Proof of transaction membership.
- Proof []byte `json:"proof"`
-
- // Hash of SignedTxnInBlock for verifying proof.
- Stibhash []byte `json:"stibhash"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
-
-// VersionsResponse defines model for VersionsResponse.
-type VersionsResponse Version
-
-// AccountInformationParams defines parameters for AccountInformation.
-type AccountInformationParams struct {
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-
- // When set to `all` will exclude asset holdings, application local state, created asset parameters, any created application parameters. Defaults to `none`.
- Exclude *string `json:"exclude,omitempty"`
-}
-
-// AccountApplicationInformationParams defines parameters for AccountApplicationInformation.
-type AccountApplicationInformationParams struct {
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// AccountAssetInformationParams defines parameters for AccountAssetInformation.
-type AccountAssetInformationParams struct {
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// GetPendingTransactionsByAddressParams defines parameters for GetPendingTransactionsByAddress.
-type GetPendingTransactionsByAddressParams struct {
-
- // Truncated number of transactions to display. If max=0, returns all pending txns.
- Max *uint64 `json:"max,omitempty"`
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// GetBlockParams defines parameters for GetBlock.
-type GetBlockParams struct {
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// GetTransactionProofParams defines parameters for GetTransactionProof.
-type GetTransactionProofParams struct {
-
- // The type of hash function used to create the proof, must be one of:
- // * sha512_256
- // * sha256
- Hashtype *string `json:"hashtype,omitempty"`
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// TealCompileParams defines parameters for TealCompile.
-type TealCompileParams struct {
-
- // When set to `true`, returns the source map of the program as a JSON. Defaults to `false`.
- Sourcemap *bool `json:"sourcemap,omitempty"`
-}
-
-// TealDryrunJSONBody defines parameters for TealDryrun.
-type TealDryrunJSONBody DryrunRequest
-
-// GetPendingTransactionsParams defines parameters for GetPendingTransactions.
-type GetPendingTransactionsParams struct {
-
- // Truncated number of transactions to display. If max=0, returns all pending txns.
- Max *uint64 `json:"max,omitempty"`
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// PendingTransactionInformationParams defines parameters for PendingTransactionInformation.
-type PendingTransactionInformationParams struct {
-
- // Configures whether the response object is JSON or MessagePack encoded.
- Format *string `json:"format,omitempty"`
-}
-
-// TealDryrunRequestBody defines body for TealDryrun for application/json ContentType.
-type TealDryrunJSONRequestBody TealDryrunJSONBody
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index dadaf5263..07816a504 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -34,9 +34,8 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
- model "github.com/algorand/go-algorand/daemon/algod/api/spec/v2"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ specv2 "github.com/algorand/go-algorand/daemon/algod/api/spec/v2"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -51,8 +50,14 @@ import (
"github.com/algorand/go-codec/codec"
)
-const maxTealSourceBytes = 1e5
-const maxTealDryrunBytes = 1e5
+// max compiled teal program is currently 8k
+// but we allow for comments, spacing, and repeated consts
+// in the source teal, allow up to 200kb
+const maxTealSourceBytes = 200_000
+
+// With the ability to hold unlimited assets DryrunRequests can
+// become quite large, allow up to 1mb
+const maxTealDryrunBytes = 1_000_000
// Handlers is an implementation to the V2 route handler interface defined by the generated code.
type Handlers struct {
@@ -65,6 +70,8 @@ type Handlers struct {
type LedgerForAPI interface {
LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error)
LookupLatest(addr basics.Address) (basics.AccountData, basics.Round, basics.MicroAlgos, error)
+ LookupKv(round basics.Round, key string) ([]byte, error)
+ LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error)
ConsensusParams(r basics.Round) (config.ConsensusParams, error)
Latest() basics.Round
LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error)
@@ -107,11 +114,11 @@ func roundToPtrOrNil(value basics.Round) *uint64 {
return &result
}
-func convertParticipationRecord(record account.ParticipationRecord) generated.ParticipationKey {
- participationKey := generated.ParticipationKey{
+func convertParticipationRecord(record account.ParticipationRecord) model.ParticipationKey {
+ participationKey := model.ParticipationKey{
Id: record.ParticipationID.String(),
Address: record.Account.String(),
- Key: generated.AccountParticipation{
+ Key: model.AccountParticipation{
VoteFirstValid: uint64(record.FirstValid),
VoteLastValid: uint64(record.LastValid),
VoteKeyDilution: record.KeyDilution,
@@ -205,7 +212,7 @@ func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error {
return badRequest(ctx, err, err.Error(), v2.Log)
}
- var response []generated.ParticipationKey
+ var response []model.ParticipationKey
for _, participationRecord := range partKeys {
response = append(response, convertParticipationRecord(participationRecord))
@@ -235,7 +242,7 @@ func (v2 *Handlers) AddParticipationKey(ctx echo.Context) error {
return badRequest(ctx, err, err.Error(), v2.Log)
}
- response := generated.PostParticipationResponse{PartId: partID.String()}
+ response := model.PostParticipationResponse{PartId: partID.String()}
return ctx.JSON(http.StatusOK, response)
}
@@ -317,15 +324,15 @@ func (v2 *Handlers) AppendKeys(ctx echo.Context, participationID string) error {
// ShutdownNode shuts down the node.
// (POST /v2/shutdown)
-func (v2 *Handlers) ShutdownNode(ctx echo.Context, params private.ShutdownNodeParams) error {
+func (v2 *Handlers) ShutdownNode(ctx echo.Context, params model.ShutdownNodeParams) error {
// TODO: shutdown endpoint
return ctx.String(http.StatusNotImplemented, "Endpoint not implemented.")
}
// AccountInformation gets account information for a given account.
// (GET /v2/accounts/{address})
-func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params generated.AccountInformationParams) error {
- handle, contentType, err := getCodecHandle(params.Format)
+func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params model.AccountInformationParams) error {
+ handle, contentType, err := getCodecHandle((*model.Format)(params.Format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -356,7 +363,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
}
totalResults := record.TotalAssets + record.TotalAssetParams + record.TotalAppLocalStates + record.TotalAppParams
if totalResults > maxResults {
- v2.Log.Info("MaxAccountAPIResults limit %d exceeded, total results %d", maxResults, totalResults)
+ v2.Log.Infof("MaxAccountAPIResults limit %d exceeded, total results %d", maxResults, totalResults)
extraData := map[string]interface{}{
"max-results": maxResults,
"total-assets-opted-in": record.TotalAssets,
@@ -364,7 +371,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
"total-apps-opted-in": record.TotalAppLocalStates,
"total-created-apps": record.TotalAppParams,
}
- return ctx.JSON(http.StatusBadRequest, generated.ErrorResponse{
+ return ctx.JSON(http.StatusBadRequest, model.ErrorResponse{
Message: "Result limit exceeded",
Data: &extraData,
})
@@ -395,7 +402,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
return internalError(ctx, err, errInternalFailure, v2.Log)
}
- response := generated.AccountResponse(account)
+ response := model.AccountResponse(account)
return ctx.JSON(http.StatusOK, response)
}
@@ -420,9 +427,9 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
return internalError(ctx, err, fmt.Sprintf("could not retrieve consensus information for last round (%d)", lastRound), v2.Log)
}
- var apiParticipation *generated.AccountParticipation
+ var apiParticipation *model.AccountParticipation
if record.VoteID != (crypto.OneTimeSignatureVerifier{}) {
- apiParticipation = &generated.AccountParticipation{
+ apiParticipation = &model.AccountParticipation{
VoteParticipationKey: record.VoteID[:],
SelectionParticipationKey: record.SelectionID[:],
VoteFirstValid: uint64(record.VoteFirstValid),
@@ -440,7 +447,7 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
return internalError(ctx, errors.New("overflow on pending reward calculation"), errInternalFailure, v2.Log)
}
- account := generated.Account{
+ account := model.Account{
SigType: nil,
Round: uint64(lastRound),
Address: addr.String(),
@@ -456,21 +463,23 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
TotalAssetsOptedIn: record.TotalAssets,
AuthAddr: addrOrNil(record.AuthAddr),
TotalAppsOptedIn: record.TotalAppLocalStates,
- AppsTotalSchema: &generated.ApplicationStateSchema{
+ AppsTotalSchema: &model.ApplicationStateSchema{
NumByteSlice: record.TotalAppSchema.NumByteSlice,
NumUint: record.TotalAppSchema.NumUint,
},
AppsTotalExtraPages: numOrNil(uint64(record.TotalExtraAppPages)),
+ TotalBoxes: numOrNil(record.TotalBoxes),
+ TotalBoxBytes: numOrNil(record.TotalBoxBytes),
MinBalance: record.MinBalance(&consensus).Raw,
}
- response := generated.AccountResponse(account)
+ response := model.AccountResponse(account)
return ctx.JSON(http.StatusOK, response)
}
// AccountAssetInformation gets account information about a given asset.
// (GET /v2/accounts/{address}/assets/{asset-id})
-func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, assetID uint64, params generated.AccountAssetInformationParams) error {
- handle, contentType, err := getCodecHandle(params.Format)
+func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, assetID uint64, params model.AccountAssetInformationParams) error {
+ handle, contentType, err := getCodecHandle((*model.Format)(params.Format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -494,7 +503,7 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as
// return msgpack response
if handle == protocol.CodecHandle {
- data, err := encode(handle, model.AssetResourceToAccountAssetModel(record))
+ data, err := encode(handle, specv2.AssetResourceToAccountAssetModel(record))
if err != nil {
return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
}
@@ -502,7 +511,7 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as
}
// prepare JSON response
- response := generated.AccountAssetResponse{Round: uint64(lastRound)}
+ response := model.AccountAssetResponse{Round: uint64(lastRound)}
if record.AssetParams != nil {
asset := AssetParamsToAsset(addr.String(), basics.AssetIndex(assetID), record.AssetParams)
@@ -510,9 +519,9 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as
}
if record.AssetHolding != nil {
- response.AssetHolding = &generated.AssetHolding{
+ response.AssetHolding = &model.AssetHolding{
Amount: record.AssetHolding.Amount,
- AssetId: uint64(assetID),
+ AssetID: uint64(assetID),
IsFrozen: record.AssetHolding.Frozen,
}
}
@@ -522,8 +531,8 @@ func (v2 *Handlers) AccountAssetInformation(ctx echo.Context, address string, as
// AccountApplicationInformation gets account information about a given app.
// (GET /v2/accounts/{address}/applications/{application-id})
-func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address string, applicationID uint64, params generated.AccountApplicationInformationParams) error {
- handle, contentType, err := getCodecHandle(params.Format)
+func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address string, applicationID uint64, params model.AccountApplicationInformationParams) error {
+ handle, contentType, err := getCodecHandle((*model.Format)(params.Format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -547,7 +556,7 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri
// return msgpack response
if handle == protocol.CodecHandle {
- data, err := encode(handle, model.AppResourceToAccountApplicationModel(record))
+ data, err := encode(handle, specv2.AppResourceToAccountApplicationModel(record))
if err != nil {
return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
}
@@ -555,7 +564,7 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri
}
// prepare JSON response
- response := generated.AccountApplicationResponse{Round: uint64(lastRound)}
+ response := model.AccountApplicationResponse{Round: uint64(lastRound)}
if record.AppParams != nil {
app := AppParamsToApplication(addr.String(), basics.AppIndex(applicationID), record.AppParams)
@@ -564,10 +573,10 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri
if record.AppLocalState != nil {
localState := convertTKVToGenerated(&record.AppLocalState.KeyValue)
- response.AppLocalState = &generated.ApplicationLocalState{
+ response.AppLocalState = &model.ApplicationLocalState{
Id: uint64(applicationID),
KeyValue: localState,
- Schema: generated.ApplicationStateSchema{
+ Schema: model.ApplicationStateSchema{
NumByteSlice: record.AppLocalState.Schema.NumByteSlice,
NumUint: record.AppLocalState.Schema.NumUint,
},
@@ -579,8 +588,8 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri
// GetBlock gets the block for the given round.
// (GET /v2/blocks/{round})
-func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.GetBlockParams) error {
- handle, contentType, err := getCodecHandle(params.Format)
+func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlockParams) error {
+ handle, contentType, err := getCodecHandle((*model.Format)(params.Format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -641,14 +650,14 @@ func (v2 *Handlers) GetBlockHash(ctx echo.Context, round uint64) error {
}
}
- response := generated.BlockHashResponse{BlockHash: crypto.Digest(block.Hash()).String()}
+ response := model.BlockHashResponse{BlockHash: crypto.Digest(block.Hash()).String()}
return ctx.JSON(http.StatusOK, response)
}
// GetTransactionProof generates a Merkle proof for a transaction in a block.
// (GET /v2/blocks/{round}/transactions/{txid}/proof)
-func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params generated.GetTransactionProofParams) error {
+func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params model.GetTransactionProofParams) error {
var txID transactions.Txid
err := txID.UnmarshalText([]byte(txid))
if err != nil {
@@ -672,7 +681,7 @@ func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid str
hashtype := "sha512_256" // default hash type for proof
if params.Hashtype != nil {
- hashtype = *params.Hashtype
+ hashtype = string(*params.Hashtype)
}
if hashtype == "sha256" && !proto.EnableSHA256TxnCommitmentHeader {
return badRequest(ctx, err, "protocol does not support sha256 vector commitment proofs", v2.Log)
@@ -713,12 +722,12 @@ func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid str
return internalError(ctx, err, "generating proof", v2.Log)
}
- response := generated.TransactionProofResponse{
+ response := model.TransactionProofResponse{
Proof: proof.GetConcatenatedProof(),
Stibhash: stibhash[:],
Idx: uint64(idx),
Treedepth: uint64(proof.TreeDepth),
- Hashtype: hashtype,
+ Hashtype: model.TransactionProofResponseHashtype(hashtype),
}
return ctx.JSON(http.StatusOK, response)
@@ -737,7 +746,7 @@ func (v2 *Handlers) GetSupply(ctx echo.Context) error {
return internalError(ctx, err, errInternalFailure, v2.Log)
}
- supply := generated.SupplyResponse{
+ supply := model.SupplyResponse{
CurrentRound: uint64(latest),
TotalMoney: totals.Participating().Raw,
OnlineMoney: totals.Online.Money.Raw,
@@ -754,7 +763,7 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error {
return internalError(ctx, err, errFailedRetrievingNodeStatus, v2.Log)
}
- response := generated.NodeStatusResponse{
+ response := model.NodeStatusResponse{
LastRound: uint64(stat.LastRound),
LastVersion: string(stat.LastVersion),
NextVersion: string(stat.NextVersion),
@@ -768,6 +777,9 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error {
CatchpointTotalAccounts: &stat.CatchpointCatchupTotalAccounts,
CatchpointProcessedAccounts: &stat.CatchpointCatchupProcessedAccounts,
CatchpointVerifiedAccounts: &stat.CatchpointCatchupVerifiedAccounts,
+ CatchpointTotalKvs: &stat.CatchpointCatchupTotalKVs,
+ CatchpointProcessedKvs: &stat.CatchpointCatchupProcessedKVs,
+ CatchpointVerifiedKvs: &stat.CatchpointCatchupVerifiedKVs,
CatchpointTotalBlocks: &stat.CatchpointCatchupTotalBlocks,
CatchpointAcquiredBlocks: &stat.CatchpointCatchupAcquiredBlocks,
}
@@ -862,7 +874,7 @@ func (v2 *Handlers) RawTransaction(ctx echo.Context) error {
// For backwards compatibility, return txid of first tx in group
txid := txgroup[0].ID()
- return ctx.JSON(http.StatusOK, generated.PostTransactionsResponse{TxId: txid.String()})
+ return ctx.JSON(http.StatusOK, model.PostTransactionsResponse{TxId: txid.String()})
}
// TealDryrun takes transactions and additional simulated ledger state and returns debugging information.
@@ -881,7 +893,7 @@ func (v2 *Handlers) TealDryrun(ctx echo.Context) error {
data := buf.Bytes()
var dr DryrunRequest
- var gdr generated.DryrunRequest
+ var gdr model.DryrunRequest
err = decode(protocol.JSONStrictHandle, data, &gdr)
if err == nil {
dr, err = DryrunRequestFromGenerated(&gdr)
@@ -905,7 +917,7 @@ func (v2 *Handlers) TealDryrun(ctx echo.Context) error {
}
}
- var response generated.DryrunResponse
+ var response model.DryrunResponse
var protocolVersion protocol.ConsensusVersion
if dr.ProtocolVersion != "" {
@@ -948,7 +960,7 @@ func (v2 *Handlers) TransactionParams(ctx echo.Context) error {
gh := v2.Node.GenesisHash()
proto := config.Consensus[stat.LastVersion]
- response := generated.TransactionParametersResponse{
+ response := model.TransactionParametersResponse{
ConsensusVersion: string(stat.LastVersion),
Fee: v2.Node.SuggestedFee().Raw,
GenesisHash: gh[:],
@@ -960,28 +972,30 @@ func (v2 *Handlers) TransactionParams(ctx echo.Context) error {
return ctx.JSON(http.StatusOK, response)
}
-type preEncodedTxInfo struct {
- AssetIndex *uint64 `codec:"asset-index,omitempty"`
- AssetClosingAmount *uint64 `codec:"asset-closing-amount,omitempty"`
- ApplicationIndex *uint64 `codec:"application-index,omitempty"`
- CloseRewards *uint64 `codec:"close-rewards,omitempty"`
- ClosingAmount *uint64 `codec:"closing-amount,omitempty"`
- ConfirmedRound *uint64 `codec:"confirmed-round,omitempty"`
- GlobalStateDelta *generated.StateDelta `codec:"global-state-delta,omitempty"`
- LocalStateDelta *[]generated.AccountStateDelta `codec:"local-state-delta,omitempty"`
- PoolError string `codec:"pool-error"`
- ReceiverRewards *uint64 `codec:"receiver-rewards,omitempty"`
- SenderRewards *uint64 `codec:"sender-rewards,omitempty"`
- Txn transactions.SignedTxn `codec:"txn"`
- Logs *[][]byte `codec:"logs,omitempty"`
- Inners *[]preEncodedTxInfo `codec:"inner-txns,omitempty"`
+// PreEncodedTxInfo represents the PendingTransaction response before it is
+// encoded to a format.
+type PreEncodedTxInfo struct {
+ AssetIndex *uint64 `codec:"asset-index,omitempty"`
+ AssetClosingAmount *uint64 `codec:"asset-closing-amount,omitempty"`
+ ApplicationIndex *uint64 `codec:"application-index,omitempty"`
+ CloseRewards *uint64 `codec:"close-rewards,omitempty"`
+ ClosingAmount *uint64 `codec:"closing-amount,omitempty"`
+ ConfirmedRound *uint64 `codec:"confirmed-round,omitempty"`
+ GlobalStateDelta *model.StateDelta `codec:"global-state-delta,omitempty"`
+ LocalStateDelta *[]model.AccountStateDelta `codec:"local-state-delta,omitempty"`
+ PoolError string `codec:"pool-error"`
+ ReceiverRewards *uint64 `codec:"receiver-rewards,omitempty"`
+ SenderRewards *uint64 `codec:"sender-rewards,omitempty"`
+ Txn transactions.SignedTxn `codec:"txn"`
+ Logs *[][]byte `codec:"logs,omitempty"`
+ Inners *[]PreEncodedTxInfo `codec:"inner-txns,omitempty"`
}
// PendingTransactionInformation returns a transaction with the specified txID
// from the transaction pool. If not found looks for the transaction in the
// last proto.MaxTxnLife rounds
// (GET /v2/transactions/pending/{txid})
-func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, params generated.PendingTransactionInformationParams) error {
+func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, params model.PendingTransactionInformationParams) error {
stat, err := v2.Node.Status()
if err != nil {
@@ -1006,8 +1020,9 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string,
}
// Encoding wasn't working well without embedding "real" objects.
- response := preEncodedTxInfo{
- Txn: txn.Txn,
+ response := PreEncodedTxInfo{
+ Txn: txn.Txn,
+ PoolError: txn.PoolError,
}
if txn.ConfirmedRound != 0 {
@@ -1026,7 +1041,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string,
response.Inners = convertInners(&txn)
}
- handle, contentType, err := getCodecHandle(params.Format)
+ handle, contentType, err := getCodecHandle((*model.Format)(params.Format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -1060,7 +1075,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format
addrPtr = &addr
}
- handle, contentType, err := getCodecHandle(format)
+ handle, contentType, err := getCodecHandle((*model.Format)(format))
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
@@ -1135,7 +1150,7 @@ func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string) error {
return internalError(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log)
}
- return ctx.JSON(code, private.CatchpointStartResponse{
+ return ctx.JSON(code, model.CatchpointStartResponse{
CatchupMessage: catchpoint,
})
}
@@ -1152,15 +1167,15 @@ func (v2 *Handlers) abortCatchup(ctx echo.Context, catchpoint string) error {
return internalError(ctx, err, fmt.Sprintf(errFailedToAbortCatchup, err), v2.Log)
}
- return ctx.JSON(http.StatusOK, private.CatchpointAbortResponse{
+ return ctx.JSON(http.StatusOK, model.CatchpointAbortResponse{
CatchupMessage: catchpoint,
})
}
// GetPendingTransactions returns the list of unconfirmed transactions currently in the transaction pool.
// (GET /v2/transactions/pending)
-func (v2 *Handlers) GetPendingTransactions(ctx echo.Context, params generated.GetPendingTransactionsParams) error {
- return v2.getPendingTransactions(ctx, params.Max, params.Format, nil)
+func (v2 *Handlers) GetPendingTransactions(ctx echo.Context, params model.GetPendingTransactionsParams) error {
+ return v2.getPendingTransactions(ctx, params.Max, (*string)(params.Format), nil)
}
// GetApplicationByID returns application information by app idx.
@@ -1188,7 +1203,98 @@ func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID uint64) e
}
appParams := *record.AppParams
app := AppParamsToApplication(creator.String(), appIdx, &appParams)
- response := generated.ApplicationResponse(app)
+ response := model.ApplicationResponse(app)
+ return ctx.JSON(http.StatusOK, response)
+}
+
+func applicationBoxesMaxKeys(requestedMax uint64, algodMax uint64) uint64 {
+ if requestedMax == 0 {
+ if algodMax == 0 {
+ return math.MaxUint64 // unlimited results when both requested and algod max are 0
+ }
+ return algodMax + 1 // API limit dominates. Increments by 1 to test if more than max supported results exist.
+ }
+
+ if requestedMax <= algodMax || algodMax == 0 {
+ return requestedMax // requested limit dominates
+ }
+
+ return algodMax + 1 // API limit dominates. Increments by 1 to test if more than max supported results exist.
+}
+
+// GetApplicationBoxes returns the box names of an application
+// (GET /v2/applications/{application-id}/boxes)
+func (v2 *Handlers) GetApplicationBoxes(ctx echo.Context, applicationID uint64, params model.GetApplicationBoxesParams) error {
+ appIdx := basics.AppIndex(applicationID)
+ ledger := v2.Node.LedgerForAPI()
+ lastRound := ledger.Latest()
+ keyPrefix := logic.MakeBoxKey(appIdx, "")
+
+ requestedMax, algodMax := nilToZero(params.Max), v2.Node.Config().MaxAPIBoxPerApplication
+ max := applicationBoxesMaxKeys(requestedMax, algodMax)
+
+ if max != math.MaxUint64 {
+ record, _, _, err := ledger.LookupAccount(ledger.Latest(), appIdx.Address())
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+ if record.TotalBoxes > max {
+ return ctx.JSON(http.StatusBadRequest, model.ErrorResponse{
+ Message: "Result limit exceeded",
+ Data: &map[string]interface{}{
+ "max-api-box-per-application": algodMax,
+ "max": requestedMax,
+ "total-boxes": record.TotalBoxes,
+ },
+ })
+ }
+ }
+
+ boxKeys, err := ledger.LookupKeysByPrefix(lastRound, keyPrefix, math.MaxUint64)
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+
+ prefixLen := len(keyPrefix)
+ responseBoxes := make([]model.BoxDescriptor, len(boxKeys))
+ for i, boxKey := range boxKeys {
+ responseBoxes[i] = model.BoxDescriptor{
+ Name: []byte(boxKey[prefixLen:]),
+ }
+ }
+ response := model.BoxesResponse{Boxes: responseBoxes}
+ return ctx.JSON(http.StatusOK, response)
+}
+
+// GetApplicationBoxByName returns the value of an application's box
+// (GET /v2/applications/{application-id}/box)
+func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint64, params model.GetApplicationBoxByNameParams) error {
+ appIdx := basics.AppIndex(applicationID)
+ ledger := v2.Node.LedgerForAPI()
+ lastRound := ledger.Latest()
+
+ encodedBoxName := params.Name
+ boxNameBytes, err := logic.NewAppCallBytes(encodedBoxName)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ boxName, err := boxNameBytes.Raw()
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ value, err := ledger.LookupKv(lastRound, logic.MakeBoxKey(appIdx, string(boxName)))
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+ if value == nil {
+ return notFound(ctx, errors.New(errBoxDoesNotExist), errBoxDoesNotExist, v2.Log)
+ }
+
+ response := model.BoxResponse{
+ Name: boxName,
+ Value: value,
+ }
return ctx.JSON(http.StatusOK, response)
}
@@ -1216,14 +1322,14 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error {
}
assetParams := *record.AssetParams
asset := AssetParamsToAsset(creator.String(), assetIdx, &assetParams)
- response := generated.AssetResponse(asset)
+ response := model.AssetResponse(asset)
return ctx.JSON(http.StatusOK, response)
}
// GetPendingTransactionsByAddress takes an Algorand address and returns its associated list of unconfirmed transactions currently in the transaction pool.
// (GET /v2/accounts/{address}/transactions/pending)
-func (v2 *Handlers) GetPendingTransactionsByAddress(ctx echo.Context, addr string, params generated.GetPendingTransactionsByAddressParams) error {
- return v2.getPendingTransactions(ctx, params.Max, params.Format, &addr)
+func (v2 *Handlers) GetPendingTransactionsByAddress(ctx echo.Context, addr string, params model.GetPendingTransactionsByAddressParams) error {
+ return v2.getPendingTransactions(ctx, params.Max, (*string)(params.Format), &addr)
}
// StartCatchup Given a catchpoint, it starts catching up to this catchpoint
@@ -1241,13 +1347,13 @@ func (v2 *Handlers) AbortCatchup(ctx echo.Context, catchpoint string) error {
// CompileResponseWithSourceMap overrides the sourcemap field in
// the CompileResponse for JSON marshalling.
type CompileResponseWithSourceMap struct {
- generated.CompileResponse
+ model.CompileResponse
Sourcemap *logic.SourceMap `json:"sourcemap,omitempty"`
}
// TealCompile compiles TEAL code to binary, return both binary and hash
// (POST /v2/teal/compile)
-func (v2 *Handlers) TealCompile(ctx echo.Context, params generated.TealCompileParams) (err error) {
+func (v2 *Handlers) TealCompile(ctx echo.Context, params model.TealCompileParams) (err error) {
// Return early if teal compile is not allowed in node config.
if !v2.Node.Config().EnableDeveloperAPI {
return ctx.String(http.StatusNotFound, "/teal/compile was not enabled in the configuration file by setting the EnableDeveloperAPI to true")
@@ -1282,7 +1388,7 @@ func (v2 *Handlers) TealCompile(ctx echo.Context, params generated.TealCompilePa
}
response := CompileResponseWithSourceMap{
- generated.CompileResponse{
+ model.CompileResponse{
Hash: addr.String(),
Result: base64.StdEncoding.EncodeToString(ops.Program),
},
@@ -1307,7 +1413,7 @@ func (v2 *Handlers) GetStateProof(ctx echo.Context, round uint64) error {
return v2.wrapStateproofError(ctx, err)
}
- response := generated.StateProofResponse{
+ response := model.StateProofResponse{
StateProof: protocol.Encode(&tx.StateProof),
}
@@ -1360,7 +1466,7 @@ func (v2 *Handlers) GetLightBlockHeaderProof(ctx echo.Context, round uint64) err
return internalError(ctx, err, err.Error(), v2.Log)
}
- response := generated.LightBlockHeaderProofResponse{
+ response := model.LightBlockHeaderProofResponse{
Index: blockIndex,
Proof: leafproof.GetConcatenatedProof(),
Treedepth: uint64(leafproof.TreeDepth),
@@ -1386,7 +1492,7 @@ func (v2 *Handlers) TealDisassemble(ctx echo.Context) error {
if err != nil {
return badRequest(ctx, err, err.Error(), v2.Log)
}
- response := generated.DisassembleResponse{
+ response := model.DisassembleResponse{
Result: program,
}
return ctx.JSON(http.StatusOK, response)
diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go
new file mode 100644
index 000000000..98635897d
--- /dev/null
+++ b/daemon/algod/api/server/v2/handlers_test.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package v2
+
+import (
+ "math"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestApplicationBoxesMaxKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Response size limited by request supplied value.
+ require.Equal(t, uint64(5), applicationBoxesMaxKeys(5, 7))
+ require.Equal(t, uint64(5), applicationBoxesMaxKeys(5, 0))
+
+ // Response size limited by algod max.
+ require.Equal(t, uint64(2), applicationBoxesMaxKeys(5, 1))
+ require.Equal(t, uint64(2), applicationBoxesMaxKeys(0, 1))
+
+ // Response size _not_ limited
+ require.Equal(t, uint64(math.MaxUint64), applicationBoxesMaxKeys(0, 0))
+}
diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go
index b7b52eeda..2937baa2c 100644
--- a/daemon/algod/api/server/v2/test/handlers_resources_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go
@@ -26,7 +26,7 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -42,6 +42,7 @@ import (
type mockLedger struct {
accounts map[basics.Address]basics.AccountData
+ kvstore map[string][]byte
latest basics.Round
blocks []bookkeeping.Block
}
@@ -61,6 +62,17 @@ func (l *mockLedger) LookupLatest(addr basics.Address) (basics.AccountData, basi
return ad, l.latest, basics.MicroAlgos{Raw: 0}, nil
}
+func (l *mockLedger) LookupKv(round basics.Round, key string) ([]byte, error) {
+ if value, ok := l.kvstore[key]; ok {
+ return value, nil
+ }
+ return nil, fmt.Errorf("Key %v does not exist", key)
+}
+
+func (l *mockLedger) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ panic("not implemented")
+}
+
func (l *mockLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) {
return config.Consensus[protocol.ConsensusFuture], nil
}
@@ -230,9 +242,9 @@ func newReq(t *testing.T) (ctx echo.Context, rec *httptest.ResponseRecorder) {
func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) basics.AccountData, acctSize, maxResults int, exclude string, expectedCode int) {
handlers, addr, acctData := setupTestForLargeResources(t, acctSize, maxResults, accountMaker)
- params := generatedV2.AccountInformationParams{}
+ params := model.AccountInformationParams{}
if exclude != "" {
- params.Exclude = &exclude
+ params.Exclude = (*model.AccountInformationParamsExclude)(&exclude)
}
ctx, rec := newReq(t)
err := handlers.AccountInformation(ctx, addr.String(), params)
@@ -289,26 +301,26 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b
for i := 0; i < ret.TotalAssets; i++ {
ctx, rec = newReq(t)
aidx := basics.AssetIndex(i * 4)
- err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), generatedV2.AccountAssetInformationParams{})
+ err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), model.AccountAssetInformationParams{})
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
- var ret generatedV2.AccountAssetResponse
+ var ret model.AccountAssetResponse
err = json.Unmarshal(rec.Body.Bytes(), &ret)
require.NoError(t, err)
assert.Nil(t, ret.CreatedAsset)
- assert.Equal(t, ret.AssetHolding, &generatedV2.AssetHolding{
+ assert.Equal(t, ret.AssetHolding, &model.AssetHolding{
Amount: acctData.Assets[aidx].Amount,
- AssetId: uint64(aidx),
+ AssetID: uint64(aidx),
IsFrozen: acctData.Assets[aidx].Frozen,
})
}
for i := 0; i < ret.TotalCreatedAssets; i++ {
ctx, rec = newReq(t)
aidx := basics.AssetIndex(i*4 + 1)
- err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), generatedV2.AccountAssetInformationParams{})
+ err = handlers.AccountAssetInformation(ctx, addr.String(), uint64(aidx), model.AccountAssetInformationParams{})
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
- var ret generatedV2.AccountAssetResponse
+ var ret model.AccountAssetResponse
err = json.Unmarshal(rec.Body.Bytes(), &ret)
require.NoError(t, err)
assert.Nil(t, ret.AssetHolding)
@@ -319,10 +331,10 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b
for i := 0; i < ret.TotalApps; i++ {
ctx, rec = newReq(t)
aidx := basics.AppIndex(i*4 + 2)
- err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), generatedV2.AccountApplicationInformationParams{})
+ err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), model.AccountApplicationInformationParams{})
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
- var ret generatedV2.AccountApplicationResponse
+ var ret model.AccountApplicationResponse
err = json.Unmarshal(rec.Body.Bytes(), &ret)
require.NoError(t, err)
assert.Nil(t, ret.CreatedApp)
@@ -335,10 +347,10 @@ func accountInformationResourceLimitsTest(t *testing.T, accountMaker func(int) b
for i := 0; i < ret.TotalCreatedApps; i++ {
ctx, rec = newReq(t)
aidx := basics.AppIndex(i*4 + 3)
- err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), generatedV2.AccountApplicationInformationParams{})
+ err = handlers.AccountApplicationInformation(ctx, addr.String(), uint64(aidx), model.AccountApplicationInformationParams{})
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
- var ret generatedV2.AccountApplicationResponse
+ var ret model.AccountApplicationResponse
err = json.Unmarshal(rec.Body.Bytes(), &ret)
require.NoError(t, err)
assert.Nil(t, ret.AppLocalState)
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index 588743a59..30ee0f799 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -37,8 +37,7 @@ import (
"github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/crypto/merklesignature"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -88,12 +87,12 @@ func TestSimpleMockBuilding(t *testing.T) {
func accountInformationTest(t *testing.T, address string, expectedCode int) {
handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
defer releasefunc()
- err := handler.AccountInformation(c, address, generatedV2.AccountInformationParams{})
+ err := handler.AccountInformation(c, address, model.AccountInformationParams{})
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
if address == poolAddr.String() {
expectedResponse := poolAddrResponseGolden
- actualResponse := generatedV2.AccountResponse{}
+ actualResponse := model.AccountResponse{}
err = protocol.DecodeJSON(rec.Body.Bytes(), &actualResponse)
require.NoError(t, err)
require.Equal(t, expectedResponse, actualResponse)
@@ -111,7 +110,7 @@ func TestAccountInformation(t *testing.T) {
func getBlockTest(t *testing.T, blockNum uint64, format string, expectedCode int) {
handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
defer releasefunc()
- err := handler.GetBlock(c, blockNum, generatedV2.GetBlockParams{Format: &format})
+ err := handler.GetBlock(c, blockNum, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)})
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
}
@@ -239,11 +238,11 @@ func TestGetBlockGetBlockHash(t *testing.T) {
}
var block1, block2 blockResponse
- var block1Hash generatedV2.BlockHashResponse
+ var block1Hash model.BlockHashResponse
format := "json"
// Get block 1
- err := handler.GetBlock(c, 1, generatedV2.GetBlockParams{Format: &format})
+ err := handler.GetBlock(c, 1, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)})
a.NoError(err)
a.Equal(200, rec.Code)
err = protocol.DecodeJSON(rec.Body.Bytes(), &block1)
@@ -251,7 +250,7 @@ func TestGetBlockGetBlockHash(t *testing.T) {
// Get block 2
c, rec = newReq(t)
- err = handler.GetBlock(c, 2, generatedV2.GetBlockParams{Format: &format})
+ err = handler.GetBlock(c, 2, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)})
a.NoError(err)
a.Equal(200, rec.Code)
err = protocol.DecodeJSON(rec.Body.Bytes(), &block2)
@@ -285,7 +284,7 @@ func TestGetBlockJsonEncoding(t *testing.T) {
// fetch the block and ensure it can be properly decoded with the standard JSON decoder
format := "json"
- err := handler.GetBlock(c, 1, generatedV2.GetBlockParams{Format: &format})
+ err := handler.GetBlock(c, 1, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)})
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
body := rec.Body.Bytes()
@@ -317,7 +316,7 @@ func TestGetStatus(t *testing.T) {
err := handler.GetStatus(c)
require.NoError(t, err)
stat := cannedStatusReportGolden
- expectedResult := generatedV2.NodeStatusResponse{
+ expectedResult := model.NodeStatusResponse{
LastRound: uint64(stat.LastRound),
LastVersion: string(stat.LastVersion),
NextVersion: string(stat.NextVersion),
@@ -333,8 +332,11 @@ func TestGetStatus(t *testing.T) {
CatchpointVerifiedAccounts: &stat.CatchpointCatchupVerifiedAccounts,
CatchpointTotalBlocks: &stat.CatchpointCatchupTotalBlocks,
CatchpointAcquiredBlocks: &stat.CatchpointCatchupAcquiredBlocks,
+ CatchpointTotalKvs: &stat.CatchpointCatchupTotalKVs,
+ CatchpointProcessedKvs: &stat.CatchpointCatchupProcessedKVs,
+ CatchpointVerifiedKvs: &stat.CatchpointCatchupVerifiedKVs,
}
- actualResult := generatedV2.NodeStatusResponse{}
+ actualResult := model.NodeStatusResponse{}
err = protocol.DecodeJSON(rec.Body.Bytes(), &actualResult)
require.NoError(t, err)
require.Equal(t, expectedResult, actualResult)
@@ -371,7 +373,7 @@ func pendingTransactionInformationTest(t *testing.T, txidToUse int, format strin
if txidToUse >= 0 {
txid = stxns[txidToUse].ID().String()
}
- params := generatedV2.PendingTransactionInformationParams{Format: &format}
+ params := model.PendingTransactionInformationParams{Format: (*model.PendingTransactionInformationParamsFormat)(&format)}
err := handler.PendingTransactionInformation(c, txid, params)
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
@@ -390,12 +392,12 @@ func TestPendingTransactionInformation(t *testing.T) {
func getPendingTransactionsTest(t *testing.T, format string, max uint64, expectedCode int) {
handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t)
defer releasefunc()
- params := generatedV2.GetPendingTransactionsParams{Format: &format, Max: &max}
+ params := model.GetPendingTransactionsParams{Format: (*model.GetPendingTransactionsParamsFormat)(&format), Max: &max}
err := handler.GetPendingTransactions(c, params)
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
if format == "json" && rec.Code == 200 {
- var response generatedV2.PendingTransactionsResponse
+ var response model.PendingTransactionsResponse
data := rec.Body.Bytes()
err = protocol.DecodeJSON(data, &response)
@@ -417,7 +419,7 @@ func getPendingTransactionsTest(t *testing.T, format string, max uint64, expecte
func TestPendingTransactionLogsEncoding(t *testing.T) {
partitiontest.PartitionTest(t)
- response := generated.PendingTransactionResponse{
+ response := model.PendingTransactionResponse{
Logs: &[][]byte{
{},
[]byte(string("a")),
@@ -473,7 +475,7 @@ func pendingTransactionsByAddressTest(t *testing.T, rootkeyToUse int, format str
if rootkeyToUse >= 0 {
address = rootkeys[rootkeyToUse].Address().String()
}
- params := generatedV2.GetPendingTransactionsByAddressParams{Format: &format}
+ params := model.GetPendingTransactionsByAddressParams{Format: (*model.GetPendingTransactionsByAddressParamsFormat)(&format)}
err := handler.GetPendingTransactionsByAddress(c, address, params)
require.NoError(t, err)
require.Equal(t, expectedCode, rec.Code)
@@ -599,7 +601,7 @@ func TestAbortCatchup(t *testing.T) {
}
func tealCompileTest(t *testing.T, bytesToUse []byte, expectedCode int,
- enableDeveloperAPI bool, params generated.TealCompileParams,
+ enableDeveloperAPI bool, params model.TealCompileParams,
expectedSourcemap *logic.SourceMap,
) (response v2.CompileResponseWithSourceMap) {
numAccounts := 1
@@ -641,7 +643,7 @@ func TestTealCompile(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- params := generated.TealCompileParams{}
+ params := model.TealCompileParams{}
tealCompileTest(t, nil, 200, true, params, nil) // nil program should work
goodProgram := fmt.Sprintf(`#pragma version %d
@@ -655,10 +657,10 @@ int 1`, logic.AssemblerMaxVersion)
// Test good program with params
tealCompileTest(t, goodProgramBytes, 200, true, params, nil)
paramValue := true
- params = generated.TealCompileParams{Sourcemap: &paramValue}
+ params = model.TealCompileParams{Sourcemap: &paramValue}
tealCompileTest(t, goodProgramBytes, 200, true, params, &expectedSourcemap)
paramValue = false
- params = generated.TealCompileParams{Sourcemap: &paramValue}
+ params = model.TealCompileParams{Sourcemap: &paramValue}
tealCompileTest(t, goodProgramBytes, 200, true, params, nil)
// Test a program without the developer API flag.
@@ -672,7 +674,7 @@ int 1`, logic.AssemblerMaxVersion)
func tealDisassembleTest(t *testing.T, program []byte, expectedCode int,
expectedString string, enableDeveloperAPI bool,
-) (response generatedV2.DisassembleResponse) {
+) (response model.DisassembleResponse) {
numAccounts := 1
numTransactions := 1
offlineAccounts := true
@@ -700,7 +702,7 @@ func tealDisassembleTest(t *testing.T, program []byte, expectedCode int,
require.NoError(t, err, string(data))
require.Equal(t, expectedString, response.Result)
} else if rec.Code == 400 {
- var response generatedV2.ErrorResponse
+ var response model.ErrorResponse
data := rec.Body.Bytes()
err = protocol.DecodeJSON(data, &response)
require.NoError(t, err, string(data))
@@ -733,9 +735,9 @@ func TestTealDisassemble(t *testing.T) {
}
func tealDryrunTest(
- t *testing.T, obj *generatedV2.DryrunRequest, format string,
+ t *testing.T, obj *model.DryrunRequest, format string,
expCode int, expResult string, enableDeveloperAPI bool,
-) (response generatedV2.DryrunResponse) {
+) (response model.DryrunResponse) {
numAccounts := 1
numTransactions := 1
offlineAccounts := true
@@ -784,7 +786,7 @@ func TestTealDryrun(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- var gdr generated.DryrunRequest
+ var gdr model.DryrunRequest
txns := []transactions.SignedTxn{
{
Txn: transactions.Transaction{
@@ -811,24 +813,24 @@ func TestTealDryrun(t *testing.T) {
failOps, err := logic.AssembleStringWithVersion("int 0", 2)
require.NoError(t, err)
- gdr.Apps = []generated.Application{
+ gdr.Apps = []model.Application{
{
Id: 1,
- Params: generated.ApplicationParams{
+ Params: model.ApplicationParams{
ApprovalProgram: sucOps.Program,
},
},
}
- localv := make(generated.TealKeyValueStore, 1)
- localv[0] = generated.TealKeyValue{
+ localv := make(model.TealKeyValueStore, 1)
+ localv[0] = model.TealKeyValue{
Key: "foo",
- Value: generated.TealValue{Type: uint64(basics.TealBytesType), Bytes: "bar"},
+ Value: model.TealValue{Type: uint64(basics.TealBytesType), Bytes: "bar"},
}
- gdr.Accounts = []generated.Account{
+ gdr.Accounts = []model.Account{
{
Address: basics.Address{}.String(),
- AppsLocalState: &[]generated.ApplicationLocalState{{
+ AppsLocalState: &[]model.ApplicationLocalState{{
Id: 1,
KeyValue: &localv,
}},
@@ -1005,19 +1007,19 @@ func TestGetProofDefault(t *testing.T) {
defer releasefunc()
txid := stx.ID()
- err := handler.GetTransactionProof(c, 1, txid.String(), generated.GetTransactionProofParams{})
+ err := handler.GetTransactionProof(c, 1, txid.String(), model.GetTransactionProofParams{})
a.NoError(err)
- var resp generatedV2.TransactionProofResponse
+ var resp model.TransactionProofResponse
err = json.Unmarshal(rec.Body.Bytes(), &resp)
a.NoError(err)
- a.Equal("sha512_256", resp.Hashtype)
+ a.Equal(model.TransactionProofResponseHashtypeSha512256, resp.Hashtype)
l := handler.Node.LedgerForAPI()
blkHdr, err := l.BlockHdr(1)
a.NoError(err)
- singleLeafProof, err := merklearray.ProofDataToSingleLeafProof(resp.Hashtype, resp.Treedepth, resp.Proof)
+ singleLeafProof, err := merklearray.ProofDataToSingleLeafProof(string(resp.Hashtype), resp.Treedepth, resp.Proof)
a.NoError(err)
element := TxnMerkleElemRaw{Txn: crypto.Digest(txid)}
@@ -1143,7 +1145,7 @@ func TestStateProof200(t *testing.T) {
a.NoError(handler.GetStateProof(ctx, stateProofIntervalForHandlerTests+1))
a.Equal(200, responseRecorder.Code)
- stprfResp := generated.StateProofResponse{}
+ stprfResp := model.StateProofResponse{}
a.NoError(json.Unmarshal(responseRecorder.Body.Bytes(), &stprfResp))
a.Equal([]byte{0x0, 0x1, 0x2}, stprfResp.Message.BlockHeadersCommitment)
@@ -1191,7 +1193,7 @@ func TestGetBlockProof200(t *testing.T) {
leafproof, err := stateproof.GenerateProofOfLightBlockHeaders(stateProofIntervalForHandlerTests, blkHdrArr, 1)
a.NoError(err)
- proofResp := generated.LightBlockHeaderProofResponse{}
+ proofResp := model.LightBlockHeaderProofResponse{}
a.NoError(json.Unmarshal(responseRecorder.Body.Bytes(), &proofResp))
a.Equal(proofResp.Proof, leafproof.GetConcatenatedProof())
a.Equal(proofResp.Treedepth, uint64(leafproof.TreeDepth))
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index ba2a29549..65c84ec71 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -26,7 +26,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -53,17 +53,20 @@ var cannedStatusReportGolden = node.StatusReport{
CatchpointCatchupProcessedAccounts: 0,
CatchpointCatchupVerifiedAccounts: 0,
CatchpointCatchupTotalAccounts: 0,
+ CatchpointCatchupTotalKVs: 0,
+ CatchpointCatchupProcessedKVs: 0,
+ CatchpointCatchupVerifiedKVs: 0,
CatchpointCatchupTotalBlocks: 0,
LastCatchpoint: "",
}
var poolAddrRewardBaseGolden = uint64(0)
-var poolAddrAssetsGolden = make([]generatedV2.AssetHolding, 0)
-var poolAddrCreatedAssetsGolden = make([]generatedV2.Asset, 0)
-var appLocalStates = make([]generatedV2.ApplicationLocalState, 0)
-var appsTotalSchema = generatedV2.ApplicationStateSchema{}
-var appCreatedApps = make([]generatedV2.Application, 0)
-var poolAddrResponseGolden = generatedV2.AccountResponse{
+var poolAddrAssetsGolden = make([]model.AssetHolding, 0)
+var poolAddrCreatedAssetsGolden = make([]model.Asset, 0)
+var appLocalStates = make([]model.ApplicationLocalState, 0)
+var appsTotalSchema = model.ApplicationStateSchema{}
+var appCreatedApps = make([]model.Application, 0)
+var poolAddrResponseGolden = model.AccountResponse{
Address: poolAddr.String(),
Amount: 50000000000,
AmountWithoutPendingRewards: 50000000000,
diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go
index fc9dbe2f4..78db965b5 100644
--- a/daemon/algod/api/server/v2/utils.go
+++ b/daemon/algod/api/server/v2/utils.go
@@ -27,7 +27,7 @@ import (
"github.com/algorand/go-codec/codec"
"github.com/labstack/echo/v4"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
@@ -38,7 +38,7 @@ import (
// returnError logs an internal message while returning the encoded response.
func returnError(ctx echo.Context, code int, internal error, external string, logger logging.Logger) error {
logger.Info(internal)
- return ctx.JSON(code, generated.ErrorResponse{Message: external})
+ return ctx.JSON(code, model.ErrorResponse{Message: external})
}
func badRequest(ctx echo.Context, internal error, external string, log logging.Logger) error {
@@ -90,6 +90,13 @@ func byteOrNil(data []byte) *[]byte {
return &data
}
+func nilToZero(numPtr *uint64) uint64 {
+ if numPtr == nil {
+ return 0
+ }
+ return *numPtr
+}
+
func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (cidx *uint64) {
// Compute transaction index in block
offset := -1
@@ -199,16 +206,16 @@ func computeAppIndexFromTxn(tx node.TxnWithStatus, l LedgerForAPI) *uint64 {
}
// getCodecHandle converts a format string into the encoder + content type
-func getCodecHandle(formatPtr *string) (codec.Handle, string, error) {
- format := "json"
+func getCodecHandle(formatPtr *model.Format) (codec.Handle, string, error) {
+ format := model.Json
if formatPtr != nil {
- format = strings.ToLower(*formatPtr)
+ format = model.PendingTransactionInformationParamsFormat(strings.ToLower(string(*formatPtr)))
}
switch format {
- case "json":
+ case model.Json:
return protocol.JSONStrictHandle, "application/json", nil
- case "msgpack":
+ case model.Msgpack:
fallthrough
case "msgp":
return protocol.CodecHandle, "application/msgpack", nil
@@ -238,16 +245,16 @@ func decode(handle codec.Handle, data []byte, v interface{}) error {
return nil
}
-// Helper to convert basics.StateDelta -> *generated.StateDelta
-func stateDeltaToStateDelta(d basics.StateDelta) *generated.StateDelta {
+// Helper to convert basics.StateDelta -> *model.StateDelta
+func stateDeltaToStateDelta(d basics.StateDelta) *model.StateDelta {
if len(d) == 0 {
return nil
}
- var delta generated.StateDelta
+ var delta model.StateDelta
for k, v := range d {
- delta = append(delta, generated.EvalDeltaKeyValue{
+ delta = append(delta, model.EvalDeltaKeyValue{
Key: base64.StdEncoding.EncodeToString([]byte(k)),
- Value: generated.EvalDelta{
+ Value: model.EvalDelta{
Action: uint64(v.Action),
Bytes: strOrNil(base64.StdEncoding.EncodeToString([]byte(v.Bytes))),
Uint: numOrNil(v.Uint),
@@ -257,10 +264,10 @@ func stateDeltaToStateDelta(d basics.StateDelta) *generated.StateDelta {
return &delta
}
-func convertToDeltas(txn node.TxnWithStatus) (*[]generated.AccountStateDelta, *generated.StateDelta) {
- var localStateDelta *[]generated.AccountStateDelta
+func convertToDeltas(txn node.TxnWithStatus) (*[]model.AccountStateDelta, *model.StateDelta) {
+ var localStateDelta *[]model.AccountStateDelta
if len(txn.ApplyData.EvalDelta.LocalDeltas) > 0 {
- d := make([]generated.AccountStateDelta, 0)
+ d := make([]model.AccountStateDelta, 0)
accounts := txn.Txn.Txn.Accounts
for k, v := range txn.ApplyData.EvalDelta.LocalDeltas {
@@ -275,7 +282,7 @@ func convertToDeltas(txn node.TxnWithStatus) (*[]generated.AccountStateDelta, *g
addr = fmt.Sprintf("Invalid Address Index: %d", k-1)
}
}
- d = append(d, generated.AccountStateDelta{
+ d = append(d, model.AccountStateDelta{
Address: addr,
Delta: *(stateDeltaToStateDelta(v)),
})
@@ -301,20 +308,20 @@ func convertLogs(txn node.TxnWithStatus) *[][]byte {
return logItems
}
-func convertInners(txn *node.TxnWithStatus) *[]preEncodedTxInfo {
- inner := make([]preEncodedTxInfo, len(txn.ApplyData.EvalDelta.InnerTxns))
+func convertInners(txn *node.TxnWithStatus) *[]PreEncodedTxInfo {
+ inner := make([]PreEncodedTxInfo, len(txn.ApplyData.EvalDelta.InnerTxns))
for i, itxn := range txn.ApplyData.EvalDelta.InnerTxns {
inner[i] = convertInnerTxn(&itxn)
}
return &inner
}
-func convertInnerTxn(txn *transactions.SignedTxnWithAD) preEncodedTxInfo {
+func convertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo {
// This copies from handlers.PendingTransactionInformation, with
// simplifications because we have a SignedTxnWithAD rather than
// TxnWithStatus, and we know this txn has committed.
- response := preEncodedTxInfo{Txn: txn.SignedTxn}
+ response := PreEncodedTxInfo{Txn: txn.SignedTxn}
response.ClosingAmount = &txn.ApplyData.ClosingAmount.Raw
response.AssetClosingAmount = &txn.ApplyData.AssetClosingAmount
diff --git a/daemon/algod/api/swagger.go b/daemon/algod/api/swagger.go
new file mode 100644
index 000000000..e20506e2c
--- /dev/null
+++ b/daemon/algod/api/swagger.go
@@ -0,0 +1,25 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package api
+
+import _ "embed" // for embedding purposes
+
+// SwaggerSpecJSONEmbed is a string that is pulled from algod.oas2.json via go-embed
+// for use with the GET /swagger.json endpoint
+//
+//go:embed algod.oas2.json
+var SwaggerSpecJSONEmbed string
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index e1c82a892..c8892513e 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -1015,7 +1015,7 @@ func (db *participationDB) Flush(timeout time.Duration) error {
// Close attempts to flush with db.flushTimeout, then waits for the write queue for another db.flushTimeout.
func (db *participationDB) Close() {
if err := db.Flush(db.flushTimeout); err != nil {
- db.log.Warnf("participationDB unhandled error during Close/Flush: %w", err)
+ db.log.Warnf("participationDB unhandled error during Close/Flush: %v", err)
}
db.store.Close()
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index 282008eb7..f83055d3d 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -85,7 +85,7 @@ func (d deleteStateProofKeysOp) apply(db *participationDB) error {
})
if err != nil {
- db.log.Warnf("participationDB unable to delete stateProof key: %w", err)
+ db.log.Warnf("participationDB unable to delete stateProof key: %v", err)
}
return err
}
diff --git a/data/accountManager.go b/data/accountManager.go
index aa5064e09..39998a09d 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -67,7 +67,7 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
if part.OverlapsInterval(rnd, rnd) {
partRndSecrets, err := manager.registry.GetForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Warnf("error while loading round secrets from participation registry: %w", err)
+ manager.log.Warnf("error while loading round secrets from participation registry: %v", err)
continue
}
out = append(out, partRndSecrets)
@@ -198,7 +198,7 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader,
// Delete expired records from participation registry.
if err := manager.registry.DeleteExpired(latestHdr.Round, agreementProto); err != nil {
- manager.log.Warnf("error while deleting expired records from participation registry: %w", err)
+ manager.log.Warnf("error while deleting expired records from participation registry: %v", err)
}
}
@@ -212,6 +212,6 @@ func (manager *AccountManager) Record(account basics.Address, round basics.Round
// This function updates a cache in the ParticipationRegistry, we must call Flush to persist the changes.
err := manager.registry.Record(account, round, participationType)
if err != nil {
- manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %w", account, participationType, round, err)
+ manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %v", account, participationType, round, err)
}
}
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index 53bcc659f..c39ca5833 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -201,8 +201,8 @@ import (
func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0009Len := uint32(17)
- var zb0009Mask uint32 /* 18 bits */
+ zb0009Len := uint32(19)
+ var zb0009Mask uint32 /* 20 bits */
if (*z).MicroAlgos.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x2
@@ -247,30 +247,38 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
zb0009Len--
zb0009Mask |= 0x800
}
- if (*z).TotalExtraAppPages == 0 {
+ if (*z).TotalBoxes == 0 {
zb0009Len--
zb0009Mask |= 0x1000
}
- if ((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0) {
+ if (*z).TotalBoxBytes == 0 {
zb0009Len--
zb0009Mask |= 0x2000
}
- if (*z).VoteID.MsgIsZero() {
+ if (*z).TotalExtraAppPages == 0 {
zb0009Len--
zb0009Mask |= 0x4000
}
- if (*z).VoteFirstValid == 0 {
+ if ((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0) {
zb0009Len--
zb0009Mask |= 0x8000
}
- if (*z).VoteKeyDilution == 0 {
+ if (*z).VoteID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x10000
}
- if (*z).VoteLastValid == 0 {
+ if (*z).VoteFirstValid == 0 {
zb0009Len--
zb0009Mask |= 0x20000
}
+ if (*z).VoteKeyDilution == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x40000
+ }
+ if (*z).VoteLastValid == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x80000
+ }
// variable map header, size zb0009Len
o = msgp.AppendMapHeader(o, zb0009Len)
if zb0009Len != 0 {
@@ -414,11 +422,21 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = (*z).StateProofID.MarshalMsg(o)
}
if (zb0009Mask & 0x1000) == 0 { // if not empty
+ // string "tbx"
+ o = append(o, 0xa3, 0x74, 0x62, 0x78)
+ o = msgp.AppendUint64(o, (*z).TotalBoxes)
+ }
+ if (zb0009Mask & 0x2000) == 0 { // if not empty
+ // string "tbxb"
+ o = append(o, 0xa4, 0x74, 0x62, 0x78, 0x62)
+ o = msgp.AppendUint64(o, (*z).TotalBoxBytes)
+ }
+ if (zb0009Mask & 0x4000) == 0 { // if not empty
// string "teap"
o = append(o, 0xa4, 0x74, 0x65, 0x61, 0x70)
o = msgp.AppendUint32(o, (*z).TotalExtraAppPages)
}
- if (zb0009Mask & 0x2000) == 0 { // if not empty
+ if (zb0009Mask & 0x8000) == 0 { // if not empty
// string "tsch"
o = append(o, 0xa4, 0x74, 0x73, 0x63, 0x68)
// omitempty: check for empty values
@@ -445,22 +463,22 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalAppSchema.NumUint)
}
}
- if (zb0009Mask & 0x4000) == 0 { // if not empty
+ if (zb0009Mask & 0x10000) == 0 { // if not empty
// string "vote"
o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
o = (*z).VoteID.MarshalMsg(o)
}
- if (zb0009Mask & 0x8000) == 0 { // if not empty
+ if (zb0009Mask & 0x20000) == 0 { // if not empty
// string "voteFst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).VoteFirstValid))
}
- if (zb0009Mask & 0x10000) == 0 { // if not empty
+ if (zb0009Mask & 0x40000) == 0 { // if not empty
// string "voteKD"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
o = msgp.AppendUint64(o, (*z).VoteKeyDilution)
}
- if (zb0009Mask & 0x20000) == 0 { // if not empty
+ if (zb0009Mask & 0x80000) == 0 { // if not empty
// string "voteLst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).VoteLastValid))
@@ -876,6 +894,22 @@ func (z *AccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
if zb0009 > 0 {
+ zb0009--
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0009 > 0 {
err = msgp.ErrTooManyArrayFields(zb0009)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -1252,6 +1286,18 @@ func (z *AccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalExtraAppPages")
return
}
+ case "tbx":
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "tbxb":
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1304,13 +1350,13 @@ func (z *AccountData) Msgsize() (s int) {
s += 0 + zb0007.Msgsize() + zb0008.Msgsize()
}
}
- s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *AccountData) MsgIsZero() bool {
- return ((*z).Status == 0) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).VoteFirstValid == 0) && ((*z).VoteLastValid == 0) && ((*z).VoteKeyDilution == 0) && (len((*z).AssetParams) == 0) && (len((*z).Assets) == 0) && ((*z).AuthAddr.MsgIsZero()) && (len((*z).AppLocalStates) == 0) && (len((*z).AppParams) == 0) && (((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0)) && ((*z).TotalExtraAppPages == 0)
+ return ((*z).Status == 0) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).VoteFirstValid == 0) && ((*z).VoteLastValid == 0) && ((*z).VoteKeyDilution == 0) && (len((*z).AssetParams) == 0) && (len((*z).Assets) == 0) && ((*z).AuthAddr.MsgIsZero()) && (len((*z).AppLocalStates) == 0) && (len((*z).AppParams) == 0) && (((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0)) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2899,8 +2945,8 @@ func (z *AssetParams) MsgIsZero() bool {
func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0009Len := uint32(18)
- var zb0009Mask uint32 /* 20 bits */
+ zb0009Len := uint32(20)
+ var zb0009Mask uint32 /* 22 bits */
if (*z).Addr.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x4
@@ -2949,30 +2995,38 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
zb0009Len--
zb0009Mask |= 0x2000
}
- if (*z).AccountData.TotalExtraAppPages == 0 {
+ if (*z).AccountData.TotalBoxes == 0 {
zb0009Len--
zb0009Mask |= 0x4000
}
- if ((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0) {
+ if (*z).AccountData.TotalBoxBytes == 0 {
zb0009Len--
zb0009Mask |= 0x8000
}
- if (*z).AccountData.VoteID.MsgIsZero() {
+ if (*z).AccountData.TotalExtraAppPages == 0 {
zb0009Len--
zb0009Mask |= 0x10000
}
- if (*z).AccountData.VoteFirstValid == 0 {
+ if ((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0) {
zb0009Len--
zb0009Mask |= 0x20000
}
- if (*z).AccountData.VoteKeyDilution == 0 {
+ if (*z).AccountData.VoteID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x40000
}
- if (*z).AccountData.VoteLastValid == 0 {
+ if (*z).AccountData.VoteFirstValid == 0 {
zb0009Len--
zb0009Mask |= 0x80000
}
+ if (*z).AccountData.VoteKeyDilution == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x100000
+ }
+ if (*z).AccountData.VoteLastValid == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x200000
+ }
// variable map header, size zb0009Len
o = msgp.AppendMapHeader(o, zb0009Len)
if zb0009Len != 0 {
@@ -3121,11 +3175,21 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = (*z).AccountData.StateProofID.MarshalMsg(o)
}
if (zb0009Mask & 0x4000) == 0 { // if not empty
+ // string "tbx"
+ o = append(o, 0xa3, 0x74, 0x62, 0x78)
+ o = msgp.AppendUint64(o, (*z).AccountData.TotalBoxes)
+ }
+ if (zb0009Mask & 0x8000) == 0 { // if not empty
+ // string "tbxb"
+ o = append(o, 0xa4, 0x74, 0x62, 0x78, 0x62)
+ o = msgp.AppendUint64(o, (*z).AccountData.TotalBoxBytes)
+ }
+ if (zb0009Mask & 0x10000) == 0 { // if not empty
// string "teap"
o = append(o, 0xa4, 0x74, 0x65, 0x61, 0x70)
o = msgp.AppendUint32(o, (*z).AccountData.TotalExtraAppPages)
}
- if (zb0009Mask & 0x8000) == 0 { // if not empty
+ if (zb0009Mask & 0x20000) == 0 { // if not empty
// string "tsch"
o = append(o, 0xa4, 0x74, 0x73, 0x63, 0x68)
// omitempty: check for empty values
@@ -3152,22 +3216,22 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).AccountData.TotalAppSchema.NumUint)
}
}
- if (zb0009Mask & 0x10000) == 0 { // if not empty
+ if (zb0009Mask & 0x40000) == 0 { // if not empty
// string "vote"
o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
o = (*z).AccountData.VoteID.MarshalMsg(o)
}
- if (zb0009Mask & 0x20000) == 0 { // if not empty
+ if (zb0009Mask & 0x80000) == 0 { // if not empty
// string "voteFst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).AccountData.VoteFirstValid))
}
- if (zb0009Mask & 0x40000) == 0 { // if not empty
+ if (zb0009Mask & 0x100000) == 0 { // if not empty
// string "voteKD"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
o = msgp.AppendUint64(o, (*z).AccountData.VoteKeyDilution)
}
- if (zb0009Mask & 0x80000) == 0 { // if not empty
+ if (zb0009Mask & 0x200000) == 0 { // if not empty
// string "voteLst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).AccountData.VoteLastValid))
@@ -3591,6 +3655,22 @@ func (z *BalanceRecord) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
if zb0009 > 0 {
+ zb0009--
+ (*z).AccountData.TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).AccountData.TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0009 > 0 {
err = msgp.ErrTooManyArrayFields(zb0009)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -3973,6 +4053,18 @@ func (z *BalanceRecord) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalExtraAppPages")
return
}
+ case "tbx":
+ (*z).AccountData.TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "tbxb":
+ (*z).AccountData.TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -4025,13 +4117,13 @@ func (z *BalanceRecord) Msgsize() (s int) {
s += 0 + zb0007.Msgsize() + zb0008.Msgsize()
}
}
- s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *BalanceRecord) MsgIsZero() bool {
- return ((*z).Addr.MsgIsZero()) && ((*z).AccountData.Status == 0) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.StateProofID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid == 0) && ((*z).AccountData.VoteLastValid == 0) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && (((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0)) && ((*z).AccountData.TotalExtraAppPages == 0)
+ return ((*z).Addr.MsgIsZero()) && ((*z).AccountData.Status == 0) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.StateProofID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid == 0) && ((*z).AccountData.VoteLastValid == 0) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && (((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0)) && ((*z).AccountData.TotalExtraAppPages == 0) && ((*z).AccountData.TotalBoxes == 0) && ((*z).AccountData.TotalBoxBytes == 0)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 2b0a2699e..06d7c4b6b 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -42,20 +42,16 @@ const (
// These two accounts also have additional Algo transfer restrictions.
NotParticipating
- // MaxEncodedAccountDataSize is a rough estimate for the worst-case scenario we're going to have of the account data and address serialized.
- // this number is verified by the TestEncodedAccountDataSize function.
- MaxEncodedAccountDataSize = 850000
-
// encodedMaxAssetsPerAccount is the decoder limit of number of assets stored per account.
// it's being verified by the unit test TestEncodedAccountAllocationBounds to align
// with config.Consensus[protocol.ConsensusCurrentVersion].MaxAssetsPerAccount; note that the decoded
// parameter is used only for protecting the decoder against malicious encoded account data stream.
- // protocol-specific constains would be tested once the decoding is complete.
+ // protocol-specific contains would be tested once the decoding is complete.
encodedMaxAssetsPerAccount = 1024
// EncodedMaxAppLocalStates is the decoder limit for number of opted-in apps in a single account.
// It is verified in TestEncodedAccountAllocationBounds to align with
- // config.Consensus[protocol.ConsensusCurrentVersion].MaxppsOptedIn
+ // config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn
EncodedMaxAppLocalStates = 64
// EncodedMaxAppParams is the decoder limit for number of created apps in a single account.
@@ -228,6 +224,12 @@ type AccountData struct {
// TotalExtraAppPages stores the extra length in pages (MaxAppProgramLen bytes per page)
// requested for app program by this account
TotalExtraAppPages uint32 `codec:"teap"`
+
+ // Total number of boxes associated with this account, which implies it is an app account.
+ TotalBoxes uint64 `codec:"tbx"`
+
+ // TotalBoxBytes stores the sum of all len(keys) and len(values) of Boxes
+ TotalBoxBytes uint64 `codec:"tbxb"`
}
// AppLocalState stores the LocalState associated with an application. It also
@@ -475,6 +477,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res MicroAlgos)
u.TotalAppSchema,
uint64(len(u.AppParams)), uint64(len(u.AppLocalStates)),
uint64(u.TotalExtraAppPages),
+ u.TotalBoxes, u.TotalBoxBytes,
)
}
@@ -487,6 +490,7 @@ func MinBalance(
totalAppSchema StateSchema,
totalAppParams uint64, totalAppLocalStates uint64,
totalExtraAppPages uint64,
+ totalBoxes uint64, totalBoxBytes uint64,
) (res MicroAlgos) {
var min uint64
@@ -514,6 +518,14 @@ func MinBalance(
extraAppProgramLenCost := MulSaturate(proto.AppFlatParamsMinBalance, totalExtraAppPages)
min = AddSaturate(min, extraAppProgramLenCost)
+ // Base MinBalance for each created box
+ boxBaseCost := MulSaturate(proto.BoxFlatMinBalance, totalBoxes)
+ min = AddSaturate(min, boxBaseCost)
+
+ // Per byte MinBalance for boxes
+ boxByteCost := MulSaturate(proto.BoxByteMinBalance, totalBoxBytes)
+ min = AddSaturate(min, boxByteCost)
+
res.Raw = min
return res
}
diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go
index 050ea2882..347dadfe6 100644
--- a/data/basics/userBalance_test.go
+++ b/data/basics/userBalance_test.go
@@ -131,102 +131,6 @@ func getSampleAccountData() AccountData {
}
}
-func TestEncodedAccountDataSize(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- maxStateSchema := StateSchema{
- NumUint: 0x1234123412341234,
- NumByteSlice: 0x1234123412341234,
- }
- ad := getSampleAccountData()
- ad.TotalAppSchema = maxStateSchema
-
- // TODO after applications enabled: change back to protocol.ConsensusCurrentVersion
- currentConsensusParams := config.Consensus[protocol.ConsensusFuture]
-
- for assetCreatorAssets := 0; assetCreatorAssets < currentConsensusParams.MaxAssetsPerAccount; assetCreatorAssets++ {
- ap := AssetParams{
- Total: 0x1234123412341234,
- Decimals: 0x12341234,
- DefaultFrozen: true,
- UnitName: makeString(currentConsensusParams.MaxAssetUnitNameBytes),
- AssetName: makeString(currentConsensusParams.MaxAssetNameBytes),
- URL: makeString(currentConsensusParams.MaxAssetURLBytes),
- Manager: Address(crypto.Hash([]byte{1, byte(assetCreatorAssets)})),
- Reserve: Address(crypto.Hash([]byte{2, byte(assetCreatorAssets)})),
- Freeze: Address(crypto.Hash([]byte{3, byte(assetCreatorAssets)})),
- Clawback: Address(crypto.Hash([]byte{4, byte(assetCreatorAssets)})),
- }
- copy(ap.MetadataHash[:], makeString(32))
- ad.AssetParams[AssetIndex(0x1234123412341234-assetCreatorAssets)] = ap
- }
-
- for assetHolderAssets := 0; assetHolderAssets < currentConsensusParams.MaxAssetsPerAccount; assetHolderAssets++ {
- ah := AssetHolding{
- Amount: 0x1234123412341234,
- Frozen: true,
- }
- ad.Assets[AssetIndex(0x1234123412341234-assetHolderAssets)] = ah
- }
-
- maxProg := []byte(makeString(config.MaxAvailableAppProgramLen))
- maxGlobalState := make(TealKeyValue, currentConsensusParams.MaxGlobalSchemaEntries)
- maxLocalState := make(TealKeyValue, currentConsensusParams.MaxLocalSchemaEntries)
-
- for globalKey := uint64(0); globalKey < currentConsensusParams.MaxGlobalSchemaEntries; globalKey++ {
- prefix := fmt.Sprintf("%d|", globalKey)
- padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
- maxKey := prefix + padding
- maxValue := TealValue{
- Type: TealBytesType,
- Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
- }
- maxGlobalState[maxKey] = maxValue
- }
-
- for localKey := uint64(0); localKey < currentConsensusParams.MaxLocalSchemaEntries; localKey++ {
- prefix := fmt.Sprintf("%d|", localKey)
- padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
- maxKey := prefix + padding
- maxValue := TealValue{
- Type: TealBytesType,
- Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
- }
- maxLocalState[maxKey] = maxValue
- }
- maxAppsCreate := currentConsensusParams.MaxAppsCreated
- if maxAppsCreate == 0 {
- maxAppsCreate = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
- }
- for appCreatorApps := 0; appCreatorApps < maxAppsCreate; appCreatorApps++ {
- ap := AppParams{
- ApprovalProgram: maxProg,
- ClearStateProgram: maxProg,
- GlobalState: maxGlobalState,
- StateSchemas: StateSchemas{
- LocalStateSchema: maxStateSchema,
- GlobalStateSchema: maxStateSchema,
- },
- }
- ad.AppParams[AppIndex(0x1234123412341234-appCreatorApps)] = ap
- }
-
- maxAppsOptedIn := currentConsensusParams.MaxAppsOptedIn
- if maxAppsOptedIn == 0 {
- maxAppsOptedIn = config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
- }
- for appHolderApps := 0; appHolderApps < maxAppsOptedIn; appHolderApps++ {
- ls := AppLocalState{
- KeyValue: maxLocalState,
- Schema: maxStateSchema,
- }
- ad.AppLocalStates[AppIndex(0x1234123412341234-appHolderApps)] = ls
- }
-
- encoded := ad.MarshalMsg(nil)
- require.GreaterOrEqual(t, MaxEncodedAccountDataSize, len(encoded))
-}
-
func TestEncodedAccountAllocationBounds(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -248,6 +152,7 @@ func TestEncodedAccountAllocationBounds(t *testing.T) {
if proto.MaxGlobalSchemaEntries > EncodedMaxKeyValueEntries {
require.Failf(t, "proto.MaxGlobalSchemaEntries > encodedMaxKeyValueEntries", "protocol version = %s", protoVer)
}
+ // There is no protocol limit to the number of Boxes per account, so that allocbound is not checked.
}
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index d5df86811..4302d14a2 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -737,16 +737,24 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
for _, tx := range txgroup {
pool.statusCache.put(tx, err.Error())
}
-
- switch err.(type) {
+ // metrics here are duplicated for historic reasons. stats is hardly used and should be removed in favor of asmstats
+ switch terr := err.(type) {
case *ledgercore.TransactionInLedgerError:
asmStats.CommittedCount++
stats.RemovedInvalidCount++
case transactions.TxnDeadError:
- asmStats.InvalidCount++
+ if int(terr.LastValid-terr.FirstValid) > 20 {
+ // cutoff value here is picked as a somewhat arbitrary cutoff trying to separate longer lived transactions from very short lived ones
+ asmStats.ExpiredLongLivedCount++
+ }
+ asmStats.ExpiredCount++
stats.ExpiredCount++
+ case *ledgercore.LeaseInLedgerError:
+ asmStats.LeaseErrorCount++
+ stats.RemovedInvalidCount++
+ pool.log.Infof("Cannot re-add pending transaction to pool: %v", err)
case transactions.MinFeeError:
- asmStats.InvalidCount++
+ asmStats.MinFeeErrorCount++
stats.RemovedInvalidCount++
pool.log.Infof("Cannot re-add pending transaction to pool: %v", err)
default:
diff --git a/data/transactions/application.go b/data/transactions/application.go
index 0db7e72b9..70fd774df 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -46,6 +46,12 @@ const (
// can contain. Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
encodedMaxForeignAssets = 32
+
+ // encodedMaxBoxes sets the allocation bound for the maximum
+ // number of Boxes that a transaction decoded off of the wire
+ // can contain. Its value is verified against consensus parameters in
+ // TestEncodedAppTxnAllocationBounds
+ encodedMaxBoxes = 32
)
// OnCompletion is an enum representing some layer 1 side effect that an
@@ -115,6 +121,12 @@ type ApplicationCallTxnFields struct {
// by the executing ApprovalProgram or ClearStateProgram.
ForeignApps []basics.AppIndex `codec:"apfa,allocbound=encodedMaxForeignApps"`
+ // Boxes are the boxes that can be accessed by this transaction (and others
+ // in the same group). The Index in the BoxRef is the slot of ForeignApps
+ // that the name is associated with (shifted by 1, so 0 indicates "current
+ // app")
+ Boxes []BoxRef `codec:"apbx,allocbound=encodedMaxBoxes"`
+
// ForeignAssets are asset IDs for assets whose AssetParams
// (and since v4, Holdings) may be read by the executing
// ApprovalProgram or ClearStateProgram.
@@ -155,6 +167,14 @@ type ApplicationCallTxnFields struct {
// method below!
}
+// BoxRef names a box by the slot
+type BoxRef struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Index uint64 `codec:"i"`
+ Name []byte `codec:"n"`
+}
+
// Empty indicates whether or not all the fields in the
// ApplicationCallTxnFields are zeroed out
func (ac *ApplicationCallTxnFields) Empty() bool {
@@ -176,6 +196,9 @@ func (ac *ApplicationCallTxnFields) Empty() bool {
if ac.ForeignAssets != nil {
return false
}
+ if ac.Boxes != nil {
+ return false
+ }
if ac.LocalStateSchema != (basics.StateSchema{}) {
return false
}
diff --git a/data/transactions/application_test.go b/data/transactions/application_test.go
index a076e4b8d..64777185c 100644
--- a/data/transactions/application_test.go
+++ b/data/transactions/application_test.go
@@ -33,7 +33,7 @@ func TestApplicationCallFieldsNotChanged(t *testing.T) {
af := ApplicationCallTxnFields{}
s := reflect.ValueOf(&af).Elem()
- if s.NumField() != 12 {
+ if s.NumField() != 13 {
t.Errorf("You added or removed a field from transactions.ApplicationCallTxnFields. " +
"Please ensure you have updated the Empty() method and then " +
"fix this test")
@@ -76,6 +76,10 @@ func TestApplicationCallFieldsEmpty(t *testing.T) {
a.False(ac.Empty())
ac.LocalStateSchema = basics.StateSchema{}
+ ac.Boxes = make([]BoxRef, 1)
+ a.False(ac.Empty())
+
+ ac.Boxes = nil
ac.GlobalStateSchema = basics.StateSchema{NumUint: 1}
a.False(ac.Empty())
@@ -115,6 +119,9 @@ func TestEncodedAppTxnAllocationBounds(t *testing.T) {
if proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets {
require.Failf(t, "proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets", "protocol version = %s", protoVer)
}
+ if proto.MaxAppBoxReferences > encodedMaxBoxes {
+ require.Failf(t, "proto.MaxAppBoxReferences > encodedMaxBoxes", "protocol version = %s", protoVer)
+ }
}
}
diff --git a/data/transactions/json_test.go b/data/transactions/json_test.go
new file mode 100644
index 000000000..547224fb6
--- /dev/null
+++ b/data/transactions/json_test.go
@@ -0,0 +1,96 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package transactions_test
+
+/* These tests are pretty low-value now. They test something very basic about
+ our codec for encoding []byte as base64 strings in json. The test were
+ written when BoxRef contained a string instead of []byte. When that was true,
+ these tests were more important because there was work that had to be done to
+ make it happen (implement MarshalJSON and UnmarshalJSON) */
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+func decode(t *testing.T, data string, v interface{}) {
+ t.Helper()
+ err := protocol.DecodeJSON([]byte(data), v)
+ require.NoErrorf(t, err, "Cannot decode %s", data)
+}
+
+func compact(data []byte) string {
+ return strings.ReplaceAll(strings.ReplaceAll(string(data), " ", ""), "\n", "")
+}
+
+// TestJsonMarshal ensures that BoxRef names are b64 encoded, since they may not be characters.
+func TestJsonMarshal(t *testing.T) {
+ marshal := protocol.EncodeJSON(transactions.BoxRef{Index: 4, Name: []byte("joe")})
+ require.Equal(t, `{"i":4,"n":"am9l"}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 0, Name: []byte("joe")})
+ require.Equal(t, `{"n":"am9l"}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 1, Name: []byte("")})
+ require.Equal(t, `{"i":1}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 0, Name: []byte("")})
+ require.Equal(t, `{}`, compact(marshal))
+}
+
+// TestJsonUnmarshal ensures that BoxRef unmarshaling expects b64 names
+func TestJsonUnmarshal(t *testing.T) {
+ var br transactions.BoxRef
+
+ decode(t, `{"i":4,"n":"am9l"}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 4, Name: []byte("joe")}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{"n":"am9l"}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 0, Name: []byte("joe")}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{"i":4}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 4, Name: nil}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 0, Name: nil}, br)
+}
+
+// TestTxnJson tests a few more things about how our Transactions get JSON
+// encoded. These things could change without breaking the protocol, should stay
+// the same for the sake of REST API compatibility.
+func TestTxnJson(t *testing.T) {
+ txn := txntest.Txn{
+ Sender: basics.Address{0x01, 0x02, 0x03},
+ }
+ marshal := protocol.EncodeJSON(txn.Txn())
+ require.Contains(t, compact(marshal), `"snd":"AEBA`)
+
+ txn = txntest.Txn{
+ Boxes: []transactions.BoxRef{{Index: 3, Name: []byte("john")}},
+ }
+ marshal = protocol.EncodeJSON(txn.Txn())
+ require.Contains(t, compact(marshal), `"apbx":[{"i":3,"n":"am9obg=="}]`)
+}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index e577ea93f..850d02a3e 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -13,12 +13,13 @@ application call transactions.
Programs have read-only access to the transaction they are attached
to, the other transactions in their atomic transaction group, and a
few global values. In addition, _Smart Contracts_ have access to
-limited state that is global to the application and per-account local
-state for each account that has opted-in to the application. For both
-types of program, approval is signaled by finishing with the stack
-containing a single non-zero uint64 value, though `return` can be used
-to signal an early approval which approves based only upon the top
-stack value being a non-zero uint64 value.
+limited state that is global to the application, per-account local
+state for each account that has opted-in to the application, and
+additional per-application arbitrary state in named _boxes_. For both types of
+program, approval is signaled by finishing with the stack containing a
+single non-zero uint64 value, though `return` can be used to signal an
+early approval which approves based only upon the top stack value
+being a non-zero uint64 value.
## The Stack
@@ -29,8 +30,15 @@ arguments from it and pushing results to it. Some operations have
_immediate_ arguments that are encoded directly into the instruction,
rather than coming from the stack.
-The maximum stack depth is 1000. If the stack depth is
-exceeded or if a byte-array element exceed 4096 bytes, the program fails.
+The maximum stack depth is 1000. If the stack depth is exceeded or if
+a byte-array element exceeds 4096 bytes, the program fails. If an
+opcode is documented to access a position in the stack that does not
+exist, the operation fails. Most often, this is an attempt to access
+an element below the stack -- the simplest example is an operation
+like `concat` which expects two arguments on the stack. If the stack
+has fewer than two elements, the operation fails. Some operations, like
+`frame_dig` and `proto` could fail because of an attempt to access
+above the current stack.
## Scratch Space
@@ -38,7 +46,9 @@ In addition to the stack there are 256 positions of scratch
space. Like stack values, scratch locations may be uint64s or
byte-arrays. Scratch locations are initialized as uint64 zero. Scratch
space is accessed by the `load(s)` and `store(s)` opcodes which move
-data from or to scratch space, respectively.
+data from or to scratch space, respectively. Application calls may
+inspect the final scratch space of earlier application calls in the
+same group using `gload(s)(s)`
## Versions
@@ -116,11 +126,13 @@ while being evaluated. If the program exceeds its budget, it fails.
Smart Contracts are executed in ApplicationCall transactions. Like
Smart Signatures, contracts indicate success by leaving a single
-non-zero integer on the stack. A failed Smart Contract call is not a
-valid transaction, thus not written to the blockchain. Nodes maintain
-a list of transactions that would succeed, given the current state of
-the blockchain, called the transaction pool. Nodes draw from the pool
-if they are called upon to propose a block.
+non-zero integer on the stack. A failed Smart Contract call to an
+ApprovalProgram is not a valid transaction, thus not written to the
+blockchain. An ApplicationCall with OnComplete set to ClearState
+invokes the ClearStateProgram, rather than the usual
+ApprovalProgram. If the ClearStateProgram fails, application state
+changes are rolled back, but the transaction still succeeds, and the
+Sender's local state for the called application is removed.
Smart Contracts have access to everything a Smart Signature may access
(see previous section), as well as the ability to examine blockchain
@@ -134,14 +146,15 @@ blockchain.
Smart contracts have limits on their execution cost (700, consensus
parameter MaxAppProgramCost). Before v4, this was a static limit on
-the cost of all the instructions in the program. Since then, the cost
+the cost of all the instructions in the program. Starting in v4, the cost
is tracked dynamically during execution and must not exceed
MaxAppProgramCost. Beginning with v5, programs costs are pooled and
tracked dynamically across app executions in a group. If `n`
application invocations appear in a group, then the total execution
-cost of such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
+cost of all such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
application calls become possible, and each such call increases the
-pooled budget by MaxAppProgramCost.
+pooled budget by MaxAppProgramCost at the time the inner group is submitted
+with `itxn_submit`.
Executions of the ClearStateProgram are more stringent, in order to
ensure that applications may be closed out, but that applications also
@@ -158,7 +171,7 @@ ClearStateProgram fails, and the app's state _is cleared_.
Smart contracts have limits on the amount of blockchain state they
may examine. Opcodes may only access blockchain resources such as
-Accounts, Assets, and contract state if the given resource is
+Accounts, Assets, Boxes, and contract state if the given resource is
_available_.
* A resource in the "foreign array" fields of the ApplicationCall
@@ -181,6 +194,14 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+ * A Box is _available_ to an Approval Program if _any_ transaction in
+ the same group contains a box reference (`txn.Boxes`) that denotes
+ the box. A box reference contains an index `i`, and name `n`. The
+ index refers to the `ith` application in the transaction's
+ ForeignApplications array, with the usual convention that 0
+ indicates the application ID of the app called by that
+ transaction. No box is ever _available_ to a ClearStateProgram.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -383,6 +404,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `intc_2` | constant 2 from intcblock |
| `intc_3` | constant 3 from intcblock |
| `pushint uint` | immediate UINT |
+| `pushints uint ...` | push sequence of immediate uints to stack in the order they appear (first uint being deepest) |
| `bytecblock bytes ...` | prepare block of byte-array constants for use by bytec |
| `bytec i` | Ith constant from bytecblock |
| `bytec_0` | constant 0 from bytecblock |
@@ -390,6 +412,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `bytec_2` | constant 2 from bytecblock |
| `bytec_3` | constant 3 from bytecblock |
| `pushbytes bytes` | immediate BYTES |
+| `pushbytess bytes ...` | push sequences of immediate byte arrays to stack (first byte array being deepest) |
| `bzero` | zero filled byte-array of length A |
| `arg n` | Nth LogicSig argument |
| `arg_0` | LogicSig argument 0 |
@@ -567,11 +590,20 @@ App fields used in the `app_params_get` opcode.
Account fields used in the `acct_params_get` opcode.
-| Index | Name | Type | Notes |
-| - | ------ | -- | --------- |
-| 0 | AcctBalance | uint64 | Account balance in microalgos |
-| 1 | AcctMinBalance | uint64 | Minimum required blance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | Address the account is rekeyed to. |
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 0 | AcctBalance | uint64 | | Account balance in microalgos |
+| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
+| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
+| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
+| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
+| 6 | AcctTotalAppsCreated | uint64 | v8 | The number of existing apps created by this account. |
+| 7 | AcctTotalAppsOptedIn | uint64 | v8 | The number of apps this account is opted into. |
+| 8 | AcctTotalAssetsCreated | uint64 | v8 | The number of existing ASAs created by this account. |
+| 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). |
+| 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. |
+| 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. |
### Flow Control
@@ -584,16 +616,16 @@ Account fields used in the `acct_params_get` opcode.
| `b target` | branch unconditionally to TARGET |
| `return` | use A as success value; end |
| `pop` | discard A |
-| `popn n` | Remove N values from the top of the stack |
+| `popn n` | remove N values from the top of the stack |
| `dup` | duplicate A |
| `dup2` | duplicate A and B |
| `dupn n` | duplicate A, N times |
| `dig n` | Nth value from the top of the stack. dig 0 is equivalent to dup |
-| `bury n` | Replace the Nth value from the top of the stack. bury 0 fails. |
+| `bury n` | replace the Nth value from the top of the stack with A. bury 0 fails. |
| `cover n` | remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N. |
| `uncover n` | remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N. |
| `frame_dig i` | Nth (signed) value from the frame pointer. |
-| `frame_bury i` | Replace the Nth (signed) value from the frame pointer in the stack |
+| `frame_bury i` | replace the Nth (signed) value from the frame pointer in the stack with A |
| `swap` | swaps A and B on stack |
| `select` | selects one of two values based on top-of-stack: B if C != 0, else A |
| `assert` | immediately fail unless A is a non-zero number |
@@ -601,13 +633,14 @@ Account fields used in the `acct_params_get` opcode.
| `proto a r` | Prepare top call frame for a retsub that will assume A args and R return values. |
| `retsub` | pop the top instruction from the call stack and branch to it |
| `switch target ...` | branch to the Ath label. Continue at following instruction if index A exceeds the number of labels. |
+| `match target ...` | given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found. |
### State Access
| Opcode | Description |
| - | -- |
-| `balance` | get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. |
-| `min_balance` | get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. |
+| `balance` | balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit` |
+| `min_balance` | minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change. |
| `app_opted_in` | 1 if account A is opted in to application B, else 0 |
| `app_local_get` | local state of the key B in the current application in account A |
| `app_local_get_ex` | X is the local state of application B, key C in account A. Y is 1 if key existed, else 0 |
@@ -624,6 +657,28 @@ Account fields used in the `acct_params_get` opcode.
| `log` | write A to log state of the current application |
| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) |
+### Box Access
+
+All box related opcodes fail immediately if used in a
+ClearStateProgram. This behavior is meant to discourage Smart Contract
+authors from depending upon the availability of boxes in a ClearState
+transaction, as accounts using ClearState are under no requirement to
+furnish appropriate Box References. Authors would do well to keep the
+same issue in mind with respect to the availability of Accounts,
+Assets, and Apps though State Access opcodes _are_ allowed in
+ClearState programs because the current application and sender account
+are sure to be _available_.
+
+| Opcode | Description |
+| - | -- |
+| `box_create` | create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1 |
+| `box_extract` | read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. |
+| `box_replace` | write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. |
+| `box_del` | delete box named A if it exists. Return 1 if A existed, 0 otherwise |
+| `box_len` | X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0. |
+| `box_get` | X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0. |
+| `box_put` | replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist |
+
### Inner Transactions
The following opcodes allow for "inner transactions". Inner
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index c323b51de..6dfcfe966 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -13,12 +13,13 @@ application call transactions.
Programs have read-only access to the transaction they are attached
to, the other transactions in their atomic transaction group, and a
few global values. In addition, _Smart Contracts_ have access to
-limited state that is global to the application and per-account local
-state for each account that has opted-in to the application. For both
-types of program, approval is signaled by finishing with the stack
-containing a single non-zero uint64 value, though `return` can be used
-to signal an early approval which approves based only upon the top
-stack value being a non-zero uint64 value.
+limited state that is global to the application, per-account local
+state for each account that has opted-in to the application, and
+additional per-application arbitrary state in named _boxes_. For both types of
+program, approval is signaled by finishing with the stack containing a
+single non-zero uint64 value, though `return` can be used to signal an
+early approval which approves based only upon the top stack value
+being a non-zero uint64 value.
## The Stack
@@ -29,8 +30,15 @@ arguments from it and pushing results to it. Some operations have
_immediate_ arguments that are encoded directly into the instruction,
rather than coming from the stack.
-The maximum stack depth is 1000. If the stack depth is
-exceeded or if a byte-array element exceed 4096 bytes, the program fails.
+The maximum stack depth is 1000. If the stack depth is exceeded or if
+a byte-array element exceeds 4096 bytes, the program fails. If an
+opcode is documented to access a position in the stack that does not
+exist, the operation fails. Most often, this is an attempt to access
+an element below the stack -- the simplest example is an operation
+like `concat` which expects two arguments on the stack. If the stack
+has fewer than two elements, the operation fails. Some operations, like
+`frame_dig` and `proto` could fail because of an attempt to access
+above the current stack.
## Scratch Space
@@ -38,7 +46,9 @@ In addition to the stack there are 256 positions of scratch
space. Like stack values, scratch locations may be uint64s or
byte-arrays. Scratch locations are initialized as uint64 zero. Scratch
space is accessed by the `load(s)` and `store(s)` opcodes which move
-data from or to scratch space, respectively.
+data from or to scratch space, respectively. Application calls may
+inspect the final scratch space of earlier application calls in the
+same group using `gload(s)(s)`
## Versions
@@ -116,11 +126,13 @@ while being evaluated. If the program exceeds its budget, it fails.
Smart Contracts are executed in ApplicationCall transactions. Like
Smart Signatures, contracts indicate success by leaving a single
-non-zero integer on the stack. A failed Smart Contract call is not a
-valid transaction, thus not written to the blockchain. Nodes maintain
-a list of transactions that would succeed, given the current state of
-the blockchain, called the transaction pool. Nodes draw from the pool
-if they are called upon to propose a block.
+non-zero integer on the stack. A failed Smart Contract call to an
+ApprovalProgram is not a valid transaction, thus not written to the
+blockchain. An ApplicationCall with OnComplete set to ClearState
+invokes the ClearStateProgram, rather than the usual
+ApprovalProgram. If the ClearStateProgram fails, application state
+changes are rolled back, but the transaction still succeeds, and the
+Sender's local state for the called application is removed.
Smart Contracts have access to everything a Smart Signature may access
(see previous section), as well as the ability to examine blockchain
@@ -134,14 +146,15 @@ blockchain.
Smart contracts have limits on their execution cost (700, consensus
parameter MaxAppProgramCost). Before v4, this was a static limit on
-the cost of all the instructions in the program. Since then, the cost
+the cost of all the instructions in the program. Starting in v4, the cost
is tracked dynamically during execution and must not exceed
MaxAppProgramCost. Beginning with v5, programs costs are pooled and
tracked dynamically across app executions in a group. If `n`
application invocations appear in a group, then the total execution
-cost of such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
+cost of all such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
application calls become possible, and each such call increases the
-pooled budget by MaxAppProgramCost.
+pooled budget by MaxAppProgramCost at the time the inner group is submitted
+with `itxn_submit`.
Executions of the ClearStateProgram are more stringent, in order to
ensure that applications may be closed out, but that applications also
@@ -158,7 +171,7 @@ ClearStateProgram fails, and the app's state _is cleared_.
Smart contracts have limits on the amount of blockchain state they
may examine. Opcodes may only access blockchain resources such as
-Accounts, Assets, and contract state if the given resource is
+Accounts, Assets, Boxes, and contract state if the given resource is
_available_.
* A resource in the "foreign array" fields of the ApplicationCall
@@ -181,6 +194,14 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+ * A Box is _available_ to an Approval Program if _any_ transaction in
+ the same group contains a box reference (`txn.Boxes`) that denotes
+ the box. A box reference contains an index `i`, and name `n`. The
+ index refers to the `ith` application in the transaction's
+ ForeignApplications array, with the usual convention that 0
+ indicates the application ID of the app called by that
+ transaction. No box is ever _available_ to a ClearStateProgram.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -317,6 +338,20 @@ Account fields used in the `acct_params_get` opcode.
@@ State_Access.md @@
+### Box Access
+
+All box related opcodes fail immediately if used in a
+ClearStateProgram. This behavior is meant to discourage Smart Contract
+authors from depending upon the availability of boxes in a ClearState
+transaction, as accounts using ClearState are under no requirement to
+furnish appropriate Box References. Authors would do well to keep the
+same issue in mind with respect to the availability of Accounts,
+Assets, and Apps though State Access opcodes _are_ allowed in
+ClearState programs because the current application and sender account
+are sure to be _available_.
+
+@@ Box_Access.md @@
+
### Inner Transactions
The following opcodes allow for "inner transactions". Inner
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index d093c0823..cd2bd5842 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -242,7 +242,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
## intcblock uint ...
-- Opcode: 0x20 {varuint length} [{varuint value}, ...]
+- Opcode: 0x20 {varuint count} [{varuint value}, ...]
- Stack: ... &rarr; ...
- prepare block of uint64 constants for use by intc
@@ -280,7 +280,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
## bytecblock bytes ...
-- Opcode: 0x26 {varuint length} [({varuint value length} bytes), ...]
+- Opcode: 0x26 {varuint count} [({varuint value length} bytes), ...]
- Stack: ... &rarr; ...
- prepare block of byte-array constants for use by bytec
@@ -614,14 +614,14 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- Opcode: 0x45 {uint8 depth}
- Stack: ..., A &rarr; ...
-- Replace the Nth value from the top of the stack. bury 0 fails.
+- replace the Nth value from the top of the stack with A. bury 0 fails.
- Availability: v8
## popn n
- Opcode: 0x46 {uint8 stack depth}
- Stack: ..., [N items] &rarr; ...
-- Remove N values from the top of the stack
+- remove N values from the top of the stack
- Availability: v8
## dupn n
@@ -834,7 +834,7 @@ Almost all smart contracts should use simpler and smaller methods (such as the [
- Opcode: 0x60
- Stack: ..., A &rarr; ..., uint64
-- get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.
+- balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`
- Availability: v2
- Mode: Application
@@ -1013,18 +1013,27 @@ params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag
`acct_params` Fields:
-| Index | Name | Type | Notes |
-| - | ------ | -- | --------- |
-| 0 | AcctBalance | uint64 | Account balance in microalgos |
-| 1 | AcctMinBalance | uint64 | Minimum required blance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | Address the account is rekeyed to. |
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 0 | AcctBalance | uint64 | | Account balance in microalgos |
+| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
+| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
+| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
+| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
+| 6 | AcctTotalAppsCreated | uint64 | v8 | The number of existing apps created by this account. |
+| 7 | AcctTotalAppsOptedIn | uint64 | v8 | The number of apps this account is opted into. |
+| 8 | AcctTotalAssetsCreated | uint64 | v8 | The number of existing ASAs created by this account. |
+| 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). |
+| 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. |
+| 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. |
## min_balance
- Opcode: 0x78
- Stack: ..., A &rarr; ..., uint64
-- get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.
+- minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.
- Availability: v3
- Mode: Application
@@ -1048,6 +1057,24 @@ pushbytes args are not added to the bytecblock during assembly processes
pushint args are not added to the intcblock during assembly processes
+## pushbytess bytes ...
+
+- Opcode: 0x82 {varuint count} [({varuint value length} bytes), ...]
+- Stack: ... &rarr; ..., [N items]
+- push sequences of immediate byte arrays to stack (first byte array being deepest)
+- Availability: v8
+
+pushbytess args are not added to the bytecblock during assembly processes
+
+## pushints uint ...
+
+- Opcode: 0x83 {varuint count} [{varuint value}, ...]
+- Stack: ... &rarr; ..., [N items]
+- push sequence of immediate uints to stack in the order they appear (first uint being deepest)
+- Availability: v8
+
+pushints args are not added to the intcblock during assembly processes
+
## ed25519verify_bare
- Opcode: 0x84
@@ -1094,7 +1121,7 @@ Fails unless the last instruction executed was a `callsub`.
- Opcode: 0x8c {int8 frame slot}
- Stack: ..., A &rarr; ...
-- Replace the Nth (signed) value from the frame pointer in the stack
+- replace the Nth (signed) value from the frame pointer in the stack with A
- Availability: v8
## switch target ...
@@ -1104,6 +1131,15 @@ Fails unless the last instruction executed was a `callsub`.
- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.
- Availability: v8
+## match target ...
+
+- Opcode: 0x8e {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Stack: ..., [A1, A2, ..., AN], B &rarr; ...
+- given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.
+- Availability: v8
+
+`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.
+
## shl
- Opcode: 0x90
@@ -1378,6 +1414,68 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- Availability: v6
- Mode: Application
+## box_create
+
+- Opcode: 0xb9
+- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
+- create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1
+- Availability: v8
+- Mode: Application
+
+Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.
+
+## box_extract
+
+- Opcode: 0xba
+- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
+- read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
+- Availability: v8
+- Mode: Application
+
+## box_replace
+
+- Opcode: 0xbb
+- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ...
+- write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
+- Availability: v8
+- Mode: Application
+
+## box_del
+
+- Opcode: 0xbc
+- Stack: ..., A: []byte &rarr; ..., uint64
+- delete box named A if it exists. Return 1 if A existed, 0 otherwise
+- Availability: v8
+- Mode: Application
+
+## box_len
+
+- Opcode: 0xbd
+- Stack: ..., A: []byte &rarr; ..., X: uint64, Y: uint64
+- X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.
+- Availability: v8
+- Mode: Application
+
+## box_get
+
+- Opcode: 0xbe
+- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: uint64
+- X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.
+- Availability: v8
+- Mode: Application
+
+For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`
+
+## box_put
+
+- Opcode: 0xbf
+- Stack: ..., A: []byte, B: []byte &rarr; ...
+- replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist
+- Availability: v8
+- Mode: Application
+
+For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`
+
## txnas f
- Opcode: 0xc0 {uint8 transaction field index}
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index fd3c79bf4..e25e806be 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -500,7 +500,7 @@ func (ops *OpStream) ByteLiteral(val []byte) {
func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("int needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
@@ -548,7 +548,7 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
// Explicit invocation of const lookup and push
func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("intc operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
constIndex, err := byteImm(args[0], "constant")
if err != nil {
@@ -559,7 +559,7 @@ func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
}
func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("bytec operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
constIndex, err := byteImm(args[0], "constant")
if err != nil {
@@ -571,7 +571,7 @@ func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.errorf("%s needs one argument", spec.Name)
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
@@ -583,16 +583,23 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:vlen])
return nil
}
+
+func asmPushInts(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ _, err := asmIntImmArgs(ops, args)
+ return err
+}
+
func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.errorf("%s operation needs byte literal argument", spec.Name)
+ return ops.errorf("%s needs byte literal argument", spec.Name)
}
val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
if len(args) != consumed {
- return ops.errorf("%s operation with extraneous argument", spec.Name)
+ return ops.errorf("%s with extraneous argument", spec.Name)
}
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
@@ -602,6 +609,12 @@ func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
+func asmPushBytess(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ _, err := asmByteImmArgs(ops, args)
+ return err
+}
+
func base32DecodeAnyPadding(x string) (val []byte, err error) {
val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x)
if err != nil {
@@ -751,7 +764,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte "this is a string\n"
func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.errorf("%s operation needs byte literal argument", spec.Name)
+ return ops.errorf("%s needs byte literal argument", spec.Name)
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
@@ -781,7 +794,7 @@ func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error(err)
}
if len(args) != consumed {
- return ops.errorf("%s operation with extraneous argument", spec.Name)
+ return ops.errorf("%s with extraneous argument", spec.Name)
}
ops.ByteLiteral(val)
return nil
@@ -812,8 +825,7 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error("Unable to parse method signature")
}
-func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(spec.Opcode)
+func asmIntImmArgs(ops *OpStream, args []string) ([]uint64, error) {
ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
@@ -825,9 +837,17 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
}
l = binary.PutUvarint(scratch[:], cu)
ops.pending.Write(scratch[:l])
- if !ops.known.deadcode {
- ivals[i] = cu
- }
+ ivals[i] = cu
+ }
+
+ return ivals, nil
+}
+
+func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ ivals, err := asmIntImmArgs(ops, args)
+ if err != nil {
+ return err
}
if !ops.known.deadcode {
// If we previously processed an `int`, we thought we could insert our
@@ -843,8 +863,7 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(spec.Opcode)
+func asmByteImmArgs(ops *OpStream, args []string) ([][]byte, error) {
bvals := make([][]byte, 0, len(args))
rest := args
for len(rest) > 0 {
@@ -854,7 +873,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// intcblock, but parseBinaryArgs would have
// to return a useful consumed value even in
// the face of errors. Hard.
- return ops.error(err)
+ return nil, ops.error(err)
}
bvals = append(bvals, val)
rest = rest[consumed:]
@@ -867,6 +886,17 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:l])
ops.pending.Write(bv)
}
+
+ return bvals, nil
+}
+
+func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ bvals, err := asmByteImmArgs(ops, args)
+ if err != nil {
+ return err
+ }
+
if !ops.known.deadcode {
// If we previously processed a pseudo `byte`, we thought we could
// insert our own bytecblock, but now we see a manual one.
@@ -884,7 +914,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// parses base32-with-checksum account address strings into a byte literal
func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("addr operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
addr, err := basics.UnmarshalChecksumAddress(args[0])
if err != nil {
@@ -896,7 +926,7 @@ func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("arg operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
val, err := byteImm(args[0], "argument")
if err != nil {
@@ -921,7 +951,7 @@ func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("branch operation needs label argument")
+ return ops.errorf("%s needs a single label argument", spec.Name)
}
ops.referToLabel(ops.pending.Len()+1, args[0], ops.pending.Len()+spec.Size)
@@ -1454,6 +1484,24 @@ func typeDupN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
return nil, copies, nil
}
+func typePushBytess(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ types := make(StackTypes, len(args))
+ for i := range types {
+ types[i] = StackBytes
+ }
+
+ return nil, types, nil
+}
+
+func typePushInts(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ types := make(StackTypes, len(args))
+ for i := range types {
+ types[i] = StackUint64
+ }
+
+ return nil, types, nil
+}
+
func joinIntsOnOr(singularTerminator string, list ...int) string {
if len(list) == 1 {
switch list[0] {
@@ -2519,7 +2567,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
out += fmt.Sprintf("0x%s // %s", hex.EncodeToString(constant), guessByteFormat(constant))
pc = int(end)
case immInts:
- intc, nextpc, err := parseIntcblock(dis.program, pc)
+ intc, nextpc, err := parseIntImmArgs(dis.program, pc)
if err != nil {
return "", err
}
@@ -2533,7 +2581,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
}
pc = nextpc
case immBytess:
- bytec, nextpc, err := parseBytecBlock(dis.program, pc)
+ bytec, nextpc, err := parseByteImmArgs(dis.program, pc)
if err != nil {
return "", err
}
@@ -2590,13 +2638,13 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
return out, nil
}
-var errShortIntcblock = errors.New("intcblock ran past end of program")
-var errTooManyIntc = errors.New("intcblock with too many items")
+var errShortIntImmArgs = errors.New("const int list ran past end of program")
+var errTooManyIntc = errors.New("const int list with too many items")
-func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err error) {
+func parseIntImmArgs(program []byte, pos int) (intc []uint64, nextpc int, err error) {
numInts, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
- err = fmt.Errorf("could not decode intcblock size at pc=%d", pos)
+ err = fmt.Errorf("could not decode length of int list at pc=%d", pos)
return
}
pos += bytesUsed
@@ -2607,7 +2655,7 @@ func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err err
intc = make([]uint64, numInts)
for i := uint64(0); i < numInts; i++ {
if pos >= len(program) {
- err = errShortIntcblock
+ err = errShortIntImmArgs
return
}
intc[i], bytesUsed = binary.Uvarint(program[pos:])
@@ -2621,38 +2669,19 @@ func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err err
return
}
-func checkIntConstBlock(cx *EvalContext) error {
- pos := cx.pc + 1
- numInts, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode intcblock size at pc=%d", pos)
- }
- pos += bytesUsed
- if numInts > uint64(len(cx.program)) {
- return errTooManyIntc
- }
- //intc = make([]uint64, numInts)
- for i := uint64(0); i < numInts; i++ {
- if pos >= len(cx.program) {
- return errShortIntcblock
- }
- _, bytesUsed = binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos)
- }
- pos += bytesUsed
- }
- cx.nextpc = pos
- return nil
+func checkIntImmArgs(cx *EvalContext) error {
+ var err error
+ _, cx.nextpc, err = parseIntImmArgs(cx.program, cx.pc+1)
+ return err
}
-var errShortBytecblock = errors.New("bytecblock ran past end of program")
-var errTooManyItems = errors.New("bytecblock with too many items")
+var errShortByteImmArgs = errors.New("const bytes list ran past end of program")
+var errTooManyItems = errors.New("const bytes list with too many items")
-func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
+func parseByteImmArgs(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
numItems, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
- err = fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
+ err = fmt.Errorf("could not decode length of bytes list at pc=%d", pos)
return
}
pos += bytesUsed
@@ -2663,7 +2692,7 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
bytec = make([][]byte, numItems)
for i := uint64(0); i < numItems; i++ {
if pos >= len(program) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
itemLen, bytesUsed := binary.Uvarint(program[pos:])
@@ -2673,12 +2702,12 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
}
pos += bytesUsed
if pos >= len(program) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
end := uint64(pos) + itemLen
if end > uint64(len(program)) || end < uint64(pos) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
bytec[i] = program[pos : pos+int(itemLen)]
@@ -2688,38 +2717,10 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
return
}
-func checkByteConstBlock(cx *EvalContext) error {
- pos := cx.pc + 1
- numItems, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
- }
- pos += bytesUsed
- if numItems > uint64(len(cx.program)) {
- return errTooManyItems
- }
- //bytec = make([][]byte, numItems)
- for i := uint64(0); i < numItems; i++ {
- if pos >= len(cx.program) {
- return errShortBytecblock
- }
- itemLen, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos)
- }
- pos += bytesUsed
- if pos >= len(cx.program) {
- return errShortBytecblock
- }
- end := uint64(pos) + itemLen
- if end > uint64(len(cx.program)) || end < uint64(pos) {
- return errShortBytecblock
- }
- //bytec[i] = program[pos : pos+int(itemLen)]
- pos += int(itemLen)
- }
- cx.nextpc = pos
- return nil
+func checkByteImmArgs(cx *EvalContext) error {
+ var err error
+ _, cx.nextpc, err = parseByteImmArgs(cx.program, cx.pc+1)
+ return err
}
func parseSwitch(program []byte, pos int) (targets []int, nextpc int, err error) {
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 1adf9b450..d53b6fc16 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -369,6 +369,16 @@ pushint 1
gitxnas 0 Logs
`
+const boxNonsense = `
+ box_create
+ box_extract
+ box_replace
+ box_del
+ box_len
+ box_put
+ box_get
+`
+
const randomnessNonsense = `
pushint 0xffff
block BlkTimestamp
@@ -407,7 +417,15 @@ switch_label1:
pushint 1
`
-const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense
+const matchNonsense = `
+match_label0:
+pushints 1 2 1
+match match_label0 match_label1
+match_label1:
+pushbytess "1" "2" "1"
+`
+
+const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense + matchNonsense + boxNonsense
const v9Nonsense = v8Nonsense + pairingNonsense
@@ -418,11 +436,14 @@ const randomnessCompiled = "81ffff03d101d000"
const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" +
randomnessCompiled + "800243218001775c0280018881015d"
+const boxCompiled = "b9babbbcbdbfbe"
+
const switchCompiled = "81018d02fff800008101"
+const matchCompiled = "83030102018e02fff500008203013101320131"
-const v8Compiled = v7Compiled + switchCompiled + frameCompiled
+const v8Compiled = v7Compiled + switchCompiled + frameCompiled + matchCompiled + boxCompiled
-const v9Compiled = v7Compiled + pairingCompiled
+const v9Compiled = v8Compiled + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -445,6 +466,7 @@ var compiled = map[uint64]string{
6: "06" + v6Compiled,
7: "07" + v7Compiled,
8: "08" + v8Compiled,
+ 9: "09" + v9Compiled,
}
func pseudoOp(opcode string) bool {
@@ -487,7 +509,9 @@ func TestAssemble(t *testing.T) {
// check that compilation is stable over
// time. we must assemble to the same bytes
// this month that we did last month.
- expectedBytes, _ := hex.DecodeString(compiled[v])
+ bytecode, ok := compiled[v]
+ require.True(t, ok, "Need v%d bytecode", v)
+ expectedBytes, _ := hex.DecodeString(bytecode)
require.NotEmpty(t, expectedBytes)
// the hex is for convenience if the program has been changed. the
// hex string can be copy pasted back in as a new expected result.
@@ -863,8 +887,8 @@ func TestAssembleBytes(t *testing.T) {
expectedOptimizedConsts := "018006616263646566"
bad := [][]string{
- {"byte", "...operation needs byte literal argument"},
- {`byte "john" "doe"`, "...operation with extraneous argument"},
+ {"byte", "...needs byte literal argument"},
+ {`byte "john" "doe"`, "...with extraneous argument"},
}
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
@@ -1651,17 +1675,40 @@ func TestConstantArgs(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- testProg(t, "int", v, Expect{1, "int needs one argument"})
- testProg(t, "intc", v, Expect{1, "intc operation needs one argument"})
- testProg(t, "byte", v, Expect{1, "byte operation needs byte literal argument"})
- testProg(t, "bytec", v, Expect{1, "bytec operation needs one argument"})
- testProg(t, "addr", v, Expect{1, "addr operation needs one argument"})
+ testProg(t, "int", v, Expect{1, "int needs one immediate argument, was given 0"})
+ testProg(t, "int 1 2", v, Expect{1, "int needs one immediate argument, was given 2"})
+ testProg(t, "intc", v, Expect{1, "intc needs one immediate argument, was given 0"})
+ testProg(t, "intc hi bye", v, Expect{1, "intc needs one immediate argument, was given 2"})
+ testProg(t, "byte", v, Expect{1, "byte needs byte literal argument"})
+ testProg(t, "bytec", v, Expect{1, "bytec needs one immediate argument, was given 0"})
+ testProg(t, "bytec 1 x", v, Expect{1, "bytec needs one immediate argument, was given 2"})
+ testProg(t, "addr", v, Expect{1, "addr needs one immediate argument, was given 0"})
+ testProg(t, "addr x y", v, Expect{1, "addr needs one immediate argument, was given 2"})
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
- testProg(t, "pushint", v, Expect{1, "pushint needs one argument"})
- testProg(t, "pushbytes", v, Expect{1, "pushbytes operation needs byte literal argument"})
+ testProg(t, "pushint", v, Expect{1, "pushint needs one immediate argument, was given 0"})
+ testProg(t, "pushint 3 4", v, Expect{1, "pushint needs one immediate argument, was given 2"})
+ testProg(t, "pushbytes", v, Expect{1, "pushbytes needs byte literal argument"})
+ }
+}
+
+func TestBranchArgs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "b", v, Expect{1, "b needs a single label argument"})
+ testProg(t, "b lab1 lab2", v, Expect{1, "b needs a single label argument"})
+ testProg(t, "int 1; bz", v, Expect{1, "bz needs a single label argument"})
+ testProg(t, "int 1; bz a b", v, Expect{1, "bz needs a single label argument"})
+ testProg(t, "int 1; bnz", v, Expect{1, "bnz needs a single label argument"})
+ testProg(t, "int 1; bnz c d", v, Expect{1, "bnz needs a single label argument"})
}
+ for v := uint64(4); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "callsub", v, Expect{1, "callsub needs a single label argument"})
+ testProg(t, "callsub one two", v, Expect{1, "callsub needs a single label argument"})
+ }
}
func TestAssembleDisassembleErrors(t *testing.T) {
@@ -2347,13 +2394,13 @@ func TestErrShortBytecblock(t *testing.T) {
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
ops := testProg(t, text, 1)
- _, _, err := parseIntcblock(ops.Program, 1)
- require.Equal(t, err, errShortIntcblock)
+ _, _, err := parseIntImmArgs(ops.Program, 1)
+ require.Equal(t, err, errShortIntImmArgs)
var cx EvalContext
cx.program = ops.Program
- err = checkIntConstBlock(&cx)
- require.Equal(t, err, errShortIntcblock)
+ err = checkIntImmArgs(&cx)
+ require.Equal(t, err, errShortIntImmArgs)
}
func TestMethodWarning(t *testing.T) {
@@ -2753,10 +2800,8 @@ func TestGetSpec(t *testing.T) {
require.Equal(t, "unknown opcode: nonsense", ops.Errors[1].Err.Error())
}
-func TestAddPseudoDocTags(t *testing.T) {
+func TestAddPseudoDocTags(t *testing.T) { //nolint:paralleltest // Not parallel because it modifies pseudoOps and opDocByName which are global maps
partitiontest.PartitionTest(t)
- // Not parallel because it modifies pseudoOps and opDocByName which are global maps
- // t.Parallel()
defer func() {
delete(pseudoOps, "tests")
delete(opDocByName, "multiple")
@@ -2890,3 +2935,119 @@ int 1
`
testProg(t, source, AssemblerMaxVersion)
}
+
+func TestAssembleMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // fail when target doesn't correspond to existing label
+ source := `
+ pushints 1 1 1
+ match label1 label2
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+
+ // No labels is pretty degenerate, but ok, I suppose. It's just a no-op
+ testProg(t, `
+int 0
+match
+int 1
+`, AssemblerMaxVersion)
+
+ // confirm arg limit
+ source = `
+ pushints 1 2 1
+ match label1 label2
+ label1:
+ label2:
+ `
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 12) // ver (1) + pushints (5) + opcode (1) + length (1) + labels (2*2)
+
+ // confirm byte array args are assembled successfully
+ source = `
+ pushbytess "1" "2" "1"
+ match label1 label2
+ label1:
+ label2:
+ `
+ testProg(t, source, AssemblerMaxVersion)
+
+ var labels []string
+ for i := 0; i < 255; i++ {
+ labels = append(labels, fmt.Sprintf("label%d", i))
+ }
+
+ // test that 255 labels is ok
+ source = fmt.Sprintf(`
+ pushint 1
+ match %s
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 515) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*255)
+
+ // 256 is too many
+ source = fmt.Sprintf(`
+ pushint 1
+ match %s extra
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ testProg(t, source, AssemblerMaxVersion, Expect{3, "match cannot take more than 255 labels"})
+
+ // allow duplicate label reference
+ source = `
+ pushint 1
+ match label1 label1
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion)
+}
+
+func TestAssemblePushConsts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // allow empty const int list
+ source := `pushints`
+ testProg(t, source, AssemblerMaxVersion)
+
+ // allow empty const bytes list
+ source = `pushbytess`
+ testProg(t, source, AssemblerMaxVersion)
+
+ // basic test
+ source = `pushints 1 2 3`
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 6) // ver (1) + pushints (5)
+ source = `pushbytess "1" "2" "33"`
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 10) // ver (1) + pushbytess (9)
+
+ // 256 increases size of encoded length to two bytes
+ valsStr := make([]string, 256)
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("%d", 1)
+ }
+ source = fmt.Sprintf(`pushints %s`, strings.Join(valsStr, " "))
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 260) // ver (1) + opcode (1) + len (2) + ints (256)
+
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("\"%d\"", 1)
+ }
+ source = fmt.Sprintf(`pushbytess %s`, strings.Join(valsStr, " "))
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 516) // ver (1) + opcode (1) + len (2) + bytess (512)
+
+ // enforce correct types
+ source = `pushints "1" "2" "3"`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, `strconv.ParseUint: parsing "\"1\"": invalid syntax`})
+ source = `pushbytess 1 2 3`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "byte arg did not parse: 1"})
+ source = `pushints 6 4; concat`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "concat arg 1 wanted type []byte got uint64"})
+ source = `pushbytess "x" "y"; +`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "+ arg 1 wanted type uint64 got []byte"})
+}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 13a19ddaa..04fff1828 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -480,7 +480,7 @@ func TestBackwardCompatAssemble(t *testing.T) {
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testLogic(t, source, v, defaultEvalParams(nil))
+ testLogic(t, source, v, defaultEvalParams())
})
}
}
diff --git a/data/transactions/logic/box.go b/data/transactions/logic/box.go
new file mode 100644
index 000000000..6f2e9ccd9
--- /dev/null
+++ b/data/transactions/logic/box.go
@@ -0,0 +1,318 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+)
+
+const (
+ boxCreate = iota
+ boxRead
+ boxWrite
+ boxDelete
+)
+
+func (cx *EvalContext) availableBox(name string, operation int, createSize uint64) ([]byte, bool, error) {
+ if cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
+ return nil, false, fmt.Errorf("boxes may not be accessed from ClearState program")
+ }
+
+ dirty, ok := cx.available.boxes[boxRef{cx.appID, name}]
+ if !ok {
+ return nil, false, fmt.Errorf("invalid Box reference %v", name)
+ }
+
+ // Since the box is in cx.available, we know this GetBox call is cheap. It
+ // will go (at most) to the cowRoundBase. Knowledge about existence
+ // simplifies write budget tracking, then we return the info to avoid yet
+ // another call to GetBox which most ops need anyway.
+ content, exists, err := cx.Ledger.GetBox(cx.appID, name)
+ if err != nil {
+ return nil, false, err
+ }
+
+ switch operation {
+ case boxCreate:
+ if exists {
+ if createSize != uint64(len(content)) {
+ return nil, false, fmt.Errorf("box size mismatch %d %d", uint64(len(content)), createSize)
+ }
+ // Since it exists, we have no dirty work to do. The weird case of
+ // box_put, which seems like a combination of create and write, is
+ // properly handled because already used boxWrite to declare the
+ // intent to write (and tracky dirtiness).
+ return content, exists, nil
+ }
+ fallthrough // If it doesn't exist, a create is like write
+ case boxWrite:
+ writeSize := createSize
+ if exists {
+ writeSize = uint64(len(content))
+ }
+ if !dirty {
+ cx.available.dirtyBytes += writeSize
+ }
+ dirty = true
+ case boxDelete:
+ if dirty {
+ cx.available.dirtyBytes -= uint64(len(content))
+ }
+ dirty = false
+ case boxRead:
+ /* nothing to do */
+ }
+ cx.available.boxes[boxRef{cx.appID, name}] = dirty
+
+ if cx.available.dirtyBytes > cx.ioBudget {
+ return nil, false, fmt.Errorf("write budget (%d) exceeded %d", cx.ioBudget, cx.available.dirtyBytes)
+ }
+ return content, exists, nil
+}
+
+func argCheck(cx *EvalContext, name string, size uint64) error {
+ // Enforce length rules. Currently these are the same as enforced by
+ // ledger. If these were ever to change in proto, we would need to isolate
+ // changes to different program versions. (so a v7 app could not see a
+ // bigger box than expected, for example)
+ if len(name) == 0 {
+ return fmt.Errorf("box names may not be zero length")
+ }
+ if len(name) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("name too long: length was %d, maximum is %d", len(name), cx.Proto.MaxAppKeyLen)
+ }
+ if size > cx.Proto.MaxBoxSize {
+ return fmt.Errorf("box size too large: %d, maximum is %d", size, cx.Proto.MaxBoxSize)
+ }
+ return nil
+}
+
+func opBoxCreate(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // size
+ prev := last - 1 // name
+
+ name := string(cx.stack[prev].Bytes)
+ size := cx.stack[last].Uint
+
+ err := argCheck(cx, name, size)
+ if err != nil {
+ return err
+ }
+ _, exists, err := cx.availableBox(name, boxCreate, size)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ appAddr := cx.getApplicationAddress(cx.appID)
+ err = cx.Ledger.NewBox(cx.appID, name, make([]byte, size), appAddr)
+ if err != nil {
+ return err
+ }
+ }
+
+ cx.stack[prev] = boolToSV(!exists)
+ cx.stack = cx.stack[:last]
+ return err
+}
+
+func opBoxExtract(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // length
+ prev := last - 1 // start
+ pprev := prev - 1 // name
+
+ name := string(cx.stack[pprev].Bytes)
+ start := cx.stack[prev].Uint
+ length := cx.stack[last].Uint
+
+ err := argCheck(cx, name, basics.AddSaturate(start, length))
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return fmt.Errorf("no such box %#v", name)
+ }
+
+ bytes, err := extractCarefully(contents, start, length)
+ cx.stack[pprev].Bytes = bytes
+ cx.stack = cx.stack[:prev]
+ return err
+}
+
+func opBoxReplace(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // replacement
+ prev := last - 1 // start
+ pprev := prev - 1 // name
+
+ replacement := cx.stack[last].Bytes
+ start := cx.stack[prev].Uint
+ name := string(cx.stack[pprev].Bytes)
+
+ err := argCheck(cx, name, basics.AddSaturate(start, uint64(len(replacement))))
+ if err != nil {
+ return err
+ }
+
+ contents, exists, err := cx.availableBox(name, boxWrite, 0 /* size is already known */)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return fmt.Errorf("no such box %#v", name)
+ }
+
+ bytes, err := replaceCarefully(contents, replacement, start)
+ if err != nil {
+ return err
+ }
+ cx.stack = cx.stack[:pprev]
+ return cx.Ledger.SetBox(cx.appID, name, bytes)
+}
+
+func opBoxDel(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ _, exists, err := cx.availableBox(name, boxDelete, 0)
+ if err != nil {
+ return err
+ }
+ if exists {
+ appAddr := cx.getApplicationAddress(cx.appID)
+ _, err := cx.Ledger.DelBox(cx.appID, name, appAddr)
+ if err != nil {
+ return err
+ }
+ }
+ cx.stack[last] = boolToSV(exists)
+ return nil
+}
+
+func opBoxLen(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+
+ cx.stack[last] = stackValue{Uint: uint64(len(contents))}
+ cx.stack = append(cx.stack, boolToSV(exists))
+ return nil
+}
+
+func opBoxGet(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ contents = []byte{}
+ }
+ cx.stack[last].Bytes = contents // Will rightly panic if too big
+ cx.stack = append(cx.stack, boolToSV(exists))
+ return nil
+}
+
+func opBoxPut(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // value
+ prev := last - 1 // name
+
+ value := cx.stack[last].Bytes
+ name := string(cx.stack[prev].Bytes)
+
+ err := argCheck(cx, name, uint64(len(value)))
+ if err != nil {
+ return err
+ }
+
+ // This boxWrite usage requires the size, because the box may not exist.
+ contents, exists, err := cx.availableBox(name, boxWrite, uint64(len(value)))
+ if err != nil {
+ return err
+ }
+
+ cx.stack = cx.stack[:prev]
+
+ if exists {
+ /* the replacement must match existing size */
+ if len(contents) != len(value) {
+ return fmt.Errorf("attempt to box_put wrong size %d != %d", len(contents), len(value))
+ }
+ return cx.Ledger.SetBox(cx.appID, name, value)
+ }
+
+ /* The box did not exist, so create it. */
+ appAddr := cx.getApplicationAddress(cx.appID)
+ return cx.Ledger.NewBox(cx.appID, name, value, appAddr)
+}
+
+const boxPrefix = "bx:"
+const boxPrefixLength = len(boxPrefix)
+const boxNameIndex = boxPrefixLength + 8 // len("bx:") + 8 (appIdx, big-endian)
+
+// MakeBoxKey creates the key that a box named `name` under app `appIdx` should use.
+func MakeBoxKey(appIdx basics.AppIndex, name string) string {
+ /* This format is chosen so that a simple indexing scheme on the key would
+ allow for quick lookups of all the boxes of a certain app, or even all
+ the boxes of a certain app with a certain prefix.
+
+ The "bx:" prefix is so that the kvstore might be usable for things
+ besides boxes.
+ */
+ key := make([]byte, boxNameIndex+len(name))
+ copy(key, boxPrefix)
+ binary.BigEndian.PutUint64(key[boxPrefixLength:], uint64(appIdx))
+ copy(key[boxNameIndex:], name)
+ return string(key)
+}
+
+// SplitBoxKey extracts an appid and box name from a string that was created by MakeBoxKey()
+func SplitBoxKey(key string) (basics.AppIndex, string, error) {
+ if len(key) < boxNameIndex {
+ return 0, "", fmt.Errorf("SplitBoxKey() cannot extract AppIndex as key (%s) too short (length=%d)", key, len(key))
+ }
+ if key[:boxPrefixLength] != boxPrefix {
+ return 0, "", fmt.Errorf("SplitBoxKey() illegal app box prefix in key (%s). Expected prefix '%s'", key, boxPrefix)
+ }
+ keyBytes := []byte(key)
+ app := basics.AppIndex(binary.BigEndian.Uint64(keyBytes[boxPrefixLength:boxNameIndex]))
+ return app, key[boxNameIndex:], nil
+}
diff --git a/data/transactions/logic/box_test.go b/data/transactions/logic/box_test.go
new file mode 100644
index 000000000..515f0ad69
--- /dev/null
+++ b/data/transactions/logic/box_test.go
@@ -0,0 +1,602 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBoxNewDel(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ for _, size := range []int{24, 0} {
+ t.Run(fmt.Sprintf("box size=%d", size), func(t *testing.T) {
+ createSelf := fmt.Sprintf(`byte "self"; int %d; box_create;`, size)
+ createOther := fmt.Sprintf(`byte "other"; int %d; box_create;`, size)
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+
+ logic.TestApp(t, createSelf, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, createSelf+`assert;`+createSelf+`!`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, createSelf+`assert;`+createOther, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, createSelf+`assert; byte "self"; box_del`, ep)
+ logic.TestApp(t, `byte "self"; box_del; !`, ep)
+ logic.TestApp(t, createSelf+`assert
+ byte "self"; box_del; assert
+ byte "self"; box_del; !`, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, fmt.Sprintf(
+ `byte "self"; box_get; !; assert; pop
+ byte "self"; int %d; bzero; box_put; int 1`, size), ep)
+ })
+ }
+
+}
+
+func TestBoxNewBad(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ logic.TestApp(t, `byte "self"; int 999; box_create`, ep, "write budget")
+
+ // In test proto, you get 100 I/O budget per boxref
+ ten := [10]transactions.BoxRef{}
+ txn.Boxes = append(txn.Boxes, ten[:]...) // write budget is now 11*100 = 1100
+ logic.TestApp(t, `byte "self"; int 999; box_create`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1000; box_create`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1001; box_create`, ep, "box size too large")
+
+ logic.TestApp(t, `byte "unknown"; int 1000; box_create`, ep, "invalid Box reference")
+
+ long := strings.Repeat("x", 65)
+ txn.Boxes = []transactions.BoxRef{{Name: []byte(long)}}
+ logic.TestApp(t, fmt.Sprintf(`byte "%s"; int 1000; box_create`, long), ep, "name too long")
+
+ txn.Boxes = []transactions.BoxRef{{Name: []byte("")}} // irrelevant, zero check comes first anyway
+ logic.TestApp(t, `byte ""; int 1000; box_create`, ep, "zero length")
+}
+
+func TestBoxReadWrite(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ // extract some bytes until past the end, confirm the begin as zeros, and
+ // when it fails.
+ logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ byte "self"; int 1; int 2; box_extract;
+ byte 0x0000; ==; assert;
+ byte "self"; int 1; int 3; box_extract;
+ byte 0x000000; ==; assert;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00000000; ==; assert;
+ int 1`, ep)
+
+ logic.TestApp(t, `byte "self"; int 1; int 4; box_extract;
+ byte 0x00000000; ==`, ep, "extraction end 5")
+
+ // Replace some bytes until past the end, confirm when it fails.
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00303100; ==`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x303132; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00303132; ==`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x30313233; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x0030313233; ==`, ep, "replacement end 5")
+
+ // Replace with different byte in different place.
+ logic.TestApp(t, `byte "self"; int 0; byte 0x4444; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x44443132; ==`, ep)
+
+ // All bow down to the God of code coverage!
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace`, ep,
+ "no such box")
+ logic.TestApp(t, `byte "junk"; int 1; byte 0x3031; box_replace`, ep,
+ "invalid Box reference")
+}
+
+func TestBoxAcrossTxns(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ledger := logic.NewLedger(nil)
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+ // After creation in first txn, second one can read it (though it's empty)
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "self"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, nil, 8, ledger)
+ // after creation, modification, the third can read it
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "self"; int 2; byte "hi"; box_replace; int 1`,
+ `byte "self"; int 1; int 4; box_extract; byte 0x00686900; ==`, // "\0hi\0"
+ }, nil, 8, ledger)
+}
+
+// TestDirtyTracking gives confidence that the number of dirty bytes to be
+// written is tracked properly, despite repeated creates/deletes of the same
+// thing, touches in different txns, etc.
+func TestDirtyTracking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "other"; int 201; box_create`, ep, "write budget")
+ // deleting "self" doesn't give extra write budget to create big "other"
+ logic.TestApp(t, `byte "self"; box_del; !; byte "other"; int 201; box_create`, ep,
+ "write budget")
+
+ // though it cancels out a creation that happened here
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "self"; int 200; box_create;
+ `, ep)
+
+ ledger.DelBoxes(888, "self", "other")
+ // same, but create a different box than deleted
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "other"; int 200; box_create;
+ `, ep)
+
+ // no funny business by trying to del twice! this case is also interested
+ // because the read budget is spent on "other", which is 200, while the
+ // write budget is spent on "self"
+ logic.TestApp(t, `byte "other"; box_len; assert`, ep) // reminder, "other" exists!
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "self"; box_del; !; assert
+ byte "self"; int 201; box_create;
+ `, ep, "write budget")
+ logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep) // "self" was not made
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // make it
+ // Now that both exist with size 200, naming both in Boxes causes failure
+ logic.TestApp(t, `int 1`, ep, "read budget")
+
+}
+
+func TestBoxUnavailableWithClearState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ tests := map[string]string{
+ "box_create": `byte "self"; int 64; box_create`,
+ "box_del": `byte "self"; box_del`,
+ "box_extract": `byte "self"; int 7; int 0; box_extract`,
+ "box_get": `byte "self"; box_get`,
+ "box_len": `byte "self"; box_len`,
+ "box_put": `byte "put"; byte "self"; box_put`,
+ "box_replace": `byte "self"; int 0; byte "new"; box_replace`,
+ }
+
+ for name, program := range tests {
+ t.Run(name, func(t *testing.T) {
+ ep, _, l := logic.MakeSampleEnv()
+ l.NewApp(basics.Address{}, 888, basics.AppParams{})
+ ep.TxnGroup[0].Txn.OnCompletion = transactions.ClearStateOC
+ logic.TestApp(t, program, ep, "boxes may not be accessed from ClearState program")
+ })
+ }
+}
+
+func TestBoxAvailability(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ledger := logic.NewLedger(nil)
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // B is not available (recall that "self" is set up by MakeSampleEnv, in TestApps)
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, nil, 8, ledger, logic.NewExpect(1, "invalid Box reference B"))
+
+ // B is available if indexed by 0 in tx[1].Boxes
+ group := logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ Type: "appl",
+ ApplicationID: 10000,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("B")}},
+ }.SignedTxn())
+ group[0].Txn.Type = protocol.ApplicationCallTx
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+
+ // B is available if listed by appId in tx[1].Boxes
+ group = logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ Type: "appl",
+ ApplicationID: 10000,
+ ForeignApps: []basics.AppIndex{10000},
+ Boxes: []transactions.BoxRef{{Index: 1, Name: []byte("B")}},
+ }.SignedTxn())
+ group[0].Txn.Type = protocol.ApplicationCallTx
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+}
+
+func TestBoxReadBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ appID := basics.AppIndex(888)
+ appAddr := appID.Address()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, appID, basics.AppParams{})
+
+ // Sample txn has two box refs, so read budget is 2*100
+
+ ledger.NewBox(appID, "self", make([]byte, 100), appAddr)
+ ledger.NewBox(appID, "other", make([]byte, 100), appAddr)
+ ledger.NewBox(appID, "third", make([]byte, 100), appAddr)
+
+ // Right at budget
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "other"; box_len; assert; ==`, ep)
+
+ // With three box refs, read budget is now 3*100
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("third")})
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep)
+
+ // Increase "third" box size to 101
+ ledger.DelBox(appID, "third", appAddr)
+ ledger.NewBox(appID, "third", make([]byte, 101), appAddr)
+
+ // Budget exceeded
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep, "box read budget (300) exceeded")
+ // Still exceeded if we don't touch the boxes
+ logic.TestApp(t, `int 1`, ep, "box read budget (300) exceeded")
+
+ // Still exceeded with one box ref
+ txn.Boxes = txn.Boxes[2:]
+ logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep, "box read budget (100) exceeded")
+
+ // But not with two
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
+ logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep)
+}
+
+func TestBoxWriteBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+
+ // Test simple use of one box, less than, equal, or over budget
+ logic.TestApp(t, `byte "self"; int 4; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 199; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 201; box_create`, ep, "write budget (200) exceeded")
+
+ // Test interplay of two different boxes being created
+ logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ byte "other"; int 4; box_create`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ byte "self"; int 4; box_create; assert;
+ byte "other"; int 196; box_create`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ byte "self"; int 6; box_create; assert
+ byte "other"; int 196; box_create`, ep,
+ "write budget (200) exceeded")
+ ledger.DelBoxes(888, "other")
+
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 6; box_create; assert
+ byte "other"; int 196; box_create; assert // fails to create
+ byte "self"; box_del;`, ep, "write budget (200) exceeded")
+
+ logic.TestApp(t, `byte "other"; int 196; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del`, ep, "read budget") // 6 + 196 > 200
+ logic.TestApp(t, `byte "junk"; box_del`, ep, "read budget") // fails before invalid "junk" is noticed
+ ledger.DelBoxes(888, "self", "other")
+ logic.TestApp(t, `byte "junk"; box_del`, ep, "invalid Box reference")
+
+ // Create two boxes, that sum to over budget, then test trying to use them together
+ logic.TestApp(t, `byte "self"; int 101; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 101; box_create`, ep, "write budget (200) exceeded")
+
+ logic.TestApp(t, `byte "other"; int 101; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep, "read budget (200) exceeded")
+ ledger.DelBoxes(888, "other")
+
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 10; box_create`, ep)
+ // They're now small enough to read and write
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+ // writing twice is no problem (even though it's the big one)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "self"; int 50; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del`, ep) // cleanup
+
+}
+
+// TestWriteBudgetPut ensures we get write budget right for box_put
+func TestWriteBudgetPut(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+
+ // Test simple use of one box
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // equal to budget
+ logic.TestApp(t, `byte "self"; box_del`, ep)
+ logic.TestApp(t, `byte "self"; int 201; box_create`, ep, // 1 over budget
+ "write budget")
+
+ // More complicated versions that use 1 or more 150 byte boxes, so one is ok, two is over
+ logic.TestApp(t, `byte "self"; int 150; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+ logic.TestApp(t, `byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
+ // puts to same name, doesn't go over budget (although we don't optimize
+ // away puts with the same content, this test uses different contents just
+ // to be sure).
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
+ // puts to different names do
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ byte "other"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep,
+ "write budget")
+
+ // testing a regression: ensure box_put does not double debit when creating
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+}
+
+// TestBoxRepeatedCreate ensures that app is not charged write budget for
+// creates that don't do anything.
+func TestBoxRepeatedCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+ logic.TestApp(t, `byte "self"; int 201; box_create`, ep,
+ "write budget")
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 200; box_create; !; assert // does not actually create
+ byte "other"; int 200; box_create; assert // does create, and budget should be enough
+ int 1`, ep)
+
+ ledger.DelBoxes(888, "self", "other")
+ logic.TestApp(t, `byte "other"; int 200; box_create; assert
+ byte "other"; box_del; assert
+ byte "other"; int 200; box_create`, ep)
+
+}
+
+func TestIOBudgetGrow(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+ ledger.CreateBox(888, "self", 101)
+ ledger.CreateBox(888, "other", 101)
+
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep, "read budget (200) exceeded")
+
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
+ // Since we added an empty BoxRef, we can read > 200.
+ logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ byte "other"; int 1; int 7; box_extract; pop;
+ int 1`, ep)
+ // Add write, for that matter
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("another")})
+
+ // Here we read 202, and write a very different 350 (since we now have 4 brs)
+ logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ byte "other"; int 1; int 7; box_extract; pop;
+ byte "another"; int 350; box_create`, ep)
+}
+
+func TestConveniences(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // box_get of a new name reports !exists, and returns 0 length bytes.
+ logic.TestApp(t, `byte "self"; box_get; !; assert; len; !`, ep)
+
+ // box_len of a new name reports !exists, and returns 0 as the length
+ logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep)
+
+ // box_put creates the box with contents provided
+ logic.TestApp(t, `byte "self"; byte 0x3132; box_put;
+ byte "self"; box_len; assert; int 2; ==; assert
+ byte "self"; box_get; assert; byte 0x3132; ==`, ep)
+
+ // box_put fails if box exists and is wrong size (self exists from last test)
+ logic.TestApp(t, `byte "self"; byte 0x313233; box_put; int 1`, ep,
+ "box_put wrong size")
+ ledger.DelBoxes(888, "self")
+
+ // put and get can interact with created boxes
+ logic.TestApp(t, `byte "self"; int 3; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_get; assert; byte 0x000000; ==`, ep)
+ logic.TestApp(t, `byte "self"; byte 0xAABBCC; box_put; int 1`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0xDDEE; box_replace; int 1`, ep)
+ logic.TestApp(t, `byte "self"; box_get; assert; byte 0xAADDEE; ==`, ep)
+ ledger.DelBoxes(888, "self")
+
+ // box_get panics if the box is too big (for TEAL, or for proto)
+ ep.Proto.MaxBoxSize = 5000
+ ep.Proto.BytesPerBoxReference = 5000 // avoid write budget error
+ logic.TestApp(t, `byte "self"; int 4098; box_create; assert; // bigger than maxStringSize
+ byte "self"; box_get; assert; len`, ep,
+ "box_get produced a too big")
+}
+
+// TestEarlyPanics ensures that all of the box opcodes die early if they are
+// given an empty or too long name.
+func TestEarlyPanics(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ tests := map[string]string{
+ "box_create": `byte "%s"; int 10; box_create`,
+ "box_del": `byte "%s"; box_del`,
+ "box_extract": `byte "%s"; int 1; int 2; box_extract`,
+ "box_get": `byte "%s"; box_get`,
+ "box_len": `byte "%s"; box_len`,
+ "box_put": `byte "%s"; byte "hello"; box_put`,
+ "box_replace": `byte "%s"; int 0; byte "new"; box_replace`,
+ }
+
+ ep, _, l := logic.MakeSampleEnv()
+ l.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ for name, program := range tests {
+ t.Run(name+"/zero", func(t *testing.T) {
+ logic.TestApp(t, fmt.Sprintf(program, ""), ep, "zero length")
+ })
+ }
+
+ big := strings.Repeat("x", 65)
+ for name, program := range tests {
+ t.Run(name+"/long", func(t *testing.T) {
+ logic.TestApp(t, fmt.Sprintf(program, big), ep, "name too long")
+ })
+ }
+
+}
+
+func TestBoxTotals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ // The SENDER certainly has no boxes (but does exist)
+ logic.TestApp(t, `int 0; acct_params_get AcctTotalBoxes; pop; !`, ep)
+ // Nor does the app account, to start
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; !; `, ep)
+ // Create a 31 byte box with a 4 byte name
+ logic.TestApp(t, `byte "self"; int 31; box_create`, ep)
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; int 1; ==`, ep)
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxBytes; pop; int 35; ==`, ep)
+}
+
+func TestMakeBoxKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type testCase struct {
+ description string
+ name string
+ app basics.AppIndex
+ key string
+ err string
+ }
+
+ pp := func(tc testCase) string {
+ return fmt.Sprintf("<<<%s>>> (name, app) = (%#v, %d) --should--> key = %#v (err = [%s])", tc.description, tc.name, tc.app, tc.key, tc.err)
+ }
+
+ var testCases = []testCase{
+ // COPACETIC:
+ {"zero appid", "stranger", 0, "bx:\x00\x00\x00\x00\x00\x00\x00\x00stranger", ""},
+ {"typical", "348-8uj", 131231, "bx:\x00\x00\x00\x00\x00\x02\x00\x9f348-8uj", ""},
+ {"empty box name", "", 42, "bx:\x00\x00\x00\x00\x00\x00\x00*", ""},
+ {"random byteslice", "{\xbb\x04\a\xd1\xe2\xc6I\x81{", 13475904583033571713, "bx:\xbb\x04\a\xd1\xe2\xc6I\x81{\xbb\x04\a\xd1\xe2\xc6I\x81{", ""},
+
+ // ERRORS:
+ {"too short", "", 0, "stranger", "SplitBoxKey() cannot extract AppIndex as key (stranger) too short (length=8)"},
+ {"wrong prefix", "", 0, "strangersINTHEdark", "SplitBoxKey() illegal app box prefix in key (strangersINTHEdark). Expected prefix 'bx:'"},
+ }
+
+ for _, tc := range testCases {
+ app, name, err := logic.SplitBoxKey(tc.key)
+
+ if tc.err == "" {
+ key := logic.MakeBoxKey(tc.app, tc.name)
+ require.Equal(t, tc.app, app, pp(tc))
+ require.Equal(t, tc.name, name, pp(tc))
+ require.Equal(t, tc.key, key, pp(tc))
+ } else {
+ require.EqualError(t, err, tc.err, pp(tc))
+ }
+ }
+}
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index f33e8ae5c..2775e74e2 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -113,7 +113,7 @@ func TestDebuggerHook(t *testing.T) {
partitiontest.PartitionTest(t)
testDbg := testDbgHook{}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Debugger = &testDbg
testLogic(t, testProgram, AssemblerMaxVersion, ep)
@@ -223,7 +223,7 @@ func TestCallStackUpdate(t *testing.T) {
}
testDbg := testDbgHook{}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Debugger = &testDbg
testLogic(t, testCallStackProgram, AssemblerMaxVersion, ep)
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index a12149bcf..243c22ec2 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -75,6 +75,7 @@ var opDocByName = map[string]string{
"intc_2": "constant 2 from intcblock",
"intc_3": "constant 3 from intcblock",
"pushint": "immediate UINT",
+ "pushints": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)",
"bytecblock": "prepare block of byte-array constants for use by bytec",
"bytec": "Ith constant from bytecblock",
"bytec_0": "constant 0 from bytecblock",
@@ -82,6 +83,7 @@ var opDocByName = map[string]string{
"bytec_2": "constant 2 from bytecblock",
"bytec_3": "constant 3 from bytecblock",
"pushbytes": "immediate BYTES",
+ "pushbytess": "push sequences of immediate byte arrays to stack (first byte array being deepest)",
"bzero": "zero filled byte-array of length A",
"arg": "Nth LogicSig argument",
@@ -128,30 +130,29 @@ var opDocByName = map[string]string{
"dup2": "duplicate A and B",
"dupn": "duplicate A, N times",
"dig": "Nth value from the top of the stack. dig 0 is equivalent to dup",
- "bury": "Replace the Nth value from the top of the stack. bury 0 fails.",
+ "bury": "replace the Nth value from the top of the stack with A. bury 0 fails.",
"cover": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N.",
"uncover": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N.",
"swap": "swaps A and B on stack",
"select": "selects one of two values based on top-of-stack: B if C != 0, else A",
- "concat": "join A and B",
- "substring": "A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails",
- "substring3": "A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails",
- "getbit": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
- "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
- "getbyte": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
- "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
- "extract": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
- "extract3": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
- "extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
- "extract_uint32": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
- "extract_uint64": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
- "replace2": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
- "replace3": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
- "base64_decode": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
-
- "balance": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
- "min_balance": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "concat": "join A and B",
+ "substring": "A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails",
+ "substring3": "A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails",
+ "getbit": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "getbyte": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
+ "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
+ "extract": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
+ "extract3": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
+ "extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
+ "extract_uint32": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
+ "extract_uint64": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
+ "replace2": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
+ "replace3": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
+ "base64_decode": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
+ "balance": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`",
+ "min_balance": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.",
"app_opted_in": "1 if account A is opted in to application B, else 0",
"app_local_get": "local state of the key B in the current application in account A",
"app_local_get_ex": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0",
@@ -167,6 +168,7 @@ var opDocByName = map[string]string{
"acct_params_get": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
"assert": "immediately fail unless A is a non-zero number",
"callsub": "branch unconditionally to TARGET, saving the next instruction on the call stack",
+ "proto": "Prepare top call frame for a retsub that will assume A args and R return values.",
"retsub": "pop the top instruction from the call stack and branch to it",
"b+": "A plus B. A and B are interpreted as big-endian unsigned integers",
@@ -197,11 +199,19 @@ var opDocByName = map[string]string{
"block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
"switch": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
+ "match": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.",
- "proto": "Prepare top call frame for a retsub that will assume A args and R return values.",
"frame_dig": "Nth (signed) value from the frame pointer.",
- "frame_bury": "Replace the Nth (signed) value from the frame pointer in the stack",
- "popn": "Remove N values from the top of the stack",
+ "frame_bury": "replace the Nth (signed) value from the frame pointer in the stack with A",
+ "popn": "remove N values from the top of the stack",
+
+ "box_create": "create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1",
+ "box_extract": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "box_replace": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "box_del": "delete box named A if it exists. Return 1 if A existed, 0 otherwise",
+ "box_len": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.",
+ "box_get": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.",
+ "box_put": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist",
}
// OpDoc returns a description of the op
@@ -210,12 +220,14 @@ func OpDoc(opName string) string {
}
var opcodeImmediateNotes = map[string]string{
- "intcblock": "{varuint length} [{varuint value}, ...]",
+ "intcblock": "{varuint count} [{varuint value}, ...]",
"intc": "{uint8 int constant index}",
"pushint": "{varuint int}",
- "bytecblock": "{varuint length} [({varuint value length} bytes), ...]",
+ "pushints": "{varuint count} [{varuint value}, ...]",
+ "bytecblock": "{varuint count} [({varuint value length} bytes), ...]",
"bytec": "{uint8 byte constant index}",
"pushbytes": "{varuint length} {bytes}",
+ "pushbytess": "{varuint count} [({varuint value length} bytes), ...]",
"arg": "{uint8 arg index N}",
"global": "{uint8 global field index}",
@@ -273,6 +285,7 @@ var opcodeImmediateNotes = map[string]string{
"block": "{uint8 block field}",
"switch": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "match": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
"proto": "{uint8 arguments} {uint8 return values}",
"frame_dig": "{int8 frame slot}",
@@ -300,6 +313,7 @@ var opDocExtras = map[string]string{
"bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
"b": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
"callsub": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.",
+ "proto": "Fails unless the last instruction executed was a `callsub`.",
"retsub": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.",
"intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
"bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
@@ -318,7 +332,9 @@ var opDocExtras = map[string]string{
"btoi": "`btoi` fails if the input is longer than 8 bytes.",
"concat": "`concat` fails if the result would be greater than 4096 bytes.",
"pushbytes": "pushbytes args are not added to the bytecblock during assembly processes",
+ "pushbytess": "pushbytess args are not added to the bytecblock during assembly processes",
"pushint": "pushint args are not added to the intcblock during assembly processes",
+ "pushints": "pushints args are not added to the intcblock during assembly processes",
"getbit": "see explanation of bit ordering in setbit",
"setbit": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
@@ -339,9 +355,15 @@ var opDocExtras = map[string]string{
"itxn_next": "`itxn_next` initializes the transaction exactly as `itxn_begin` does",
"itxn_field": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
"itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
+
"base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
"json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
- "proto": "Fails unless the last instruction executed was a `callsub`.",
+
+ "match": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
+
+ "box_create": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.",
+ "box_get": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "box_put": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
}
// OpDocExtra returns extra documentation text about an op
@@ -357,9 +379,10 @@ var OpGroups = map[string][]string{
"Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
- "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
- "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch"},
+ "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "pushints", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "pushbytess", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
+ "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch", "match"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
+ "Box Access": {"box_create", "box_extract", "box_replace", "box_del", "box_len", "box_get", "box_put"},
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index e95293106..9b5f2a950 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -33,7 +33,9 @@ func TestOpDocs(t *testing.T) {
opsSeen[op.Name] = false
}
for name := range opDocByName {
- assert.Contains(t, opsSeen, name, "opDocByName contains strange opcode %#v", name)
+ if _, ok := opsSeen[name]; !ok { // avoid assert.Contains: printing opsSeen is waste
+ assert.Fail(t, "opDocByName contains strange opcode", "%#v", name)
+ }
opsSeen[name] = true
}
for op, seen := range opsSeen {
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 1e78d6960..34db841c1 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -113,7 +113,7 @@ func (sv stackValue) address() (addr basics.Address, err error) {
func (sv stackValue) uint() (uint64, error) {
if sv.Bytes != nil {
- return 0, errors.New("not a uint64")
+ return 0, fmt.Errorf("%#v is not a uint64", sv.Bytes)
}
return sv.Uint, nil
}
@@ -217,7 +217,7 @@ type LedgerForLogic interface {
AccountData(addr basics.Address) (ledgercore.AccountData, error)
Authorizer(addr basics.Address) (basics.Address, error)
Round() basics.Round
- LatestTimestamp() int64
+ PrevTimestamp() int64
BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error)
@@ -233,15 +233,38 @@ type LedgerForLogic interface {
SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error
DelGlobal(appIdx basics.AppIndex, key string) error
+ NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error
+ GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error)
+ SetBox(appIdx basics.AppIndex, key string, value []byte) error
+ DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error)
+
Perform(gi int, ep *EvalParams) error
Counter() uint64
}
-// resources contains a list of apps and assets. It's used to track the apps and
-// assets created by a txgroup, for "free" access.
+// resources contains a catalog of available resources. It's used to track the
+// apps, assets, and boxes that are available to a transaction, outside the
+// direct foreign array mechanism.
type resources struct {
asas []basics.AssetIndex
apps []basics.AppIndex
+
+ // boxes are all of the top-level box refs from the txgroup. Most are added
+ // during NewEvalParams(). refs using 0 on an appl create are resolved and
+ // added when the appl executes. The boolean value indicates the "dirtiness"
+ // of the box - has it been modified in this txngroup? If yes, the size of
+ // the box counts against the group writeBudget. So delete is NOT a dirtying
+ // operation.
+ boxes map[boxRef]bool
+
+ // dirtyBytes maintains a running count of the number of dirty bytes in `boxes`
+ dirtyBytes uint64
+}
+
+// boxRef is the "hydrated" form of a BoxRef - it has the actual app id, not an index
+type boxRef struct {
+ app basics.AppIndex
+ name string
}
// EvalParams contains data that comes into condition evaluation.
@@ -281,9 +304,19 @@ type EvalParams struct {
// Total allowable inner txns in a group transaction (nil before inner pooling enabled)
pooledAllowedInners *int
- // created contains resources that may be used for "created" - they need not be in
- // a foreign array. They remain empty until createdResourcesVersion.
- created *resources
+ // available contains resources that may be used even though they are not
+ // necessarily directly in the txn's "static arrays". Apps and ASAs go in if
+ // the app or asa was created earlier in the txgroup (empty until
+ // createdResourcesVersion). Boxes go in when the ep is created, to share
+ // availability across all txns in the group.
+ available *resources
+
+ // ioBudget is the number of bytes that the box ref'd boxes can sum to, and
+ // the number of bytes that created or written boxes may sum to.
+ ioBudget uint64
+
+ // readBudgetChecked allows us to only check the read budget once
+ readBudgetChecked bool
// Caching these here means the hashes can be shared across the TxnGroup
// (and inners, because the cache is shared with the inner EvalParams)
@@ -310,9 +343,30 @@ func copyWithClearAD(txgroup []transactions.SignedTxnWithAD) []transactions.Sign
// NewEvalParams creates an EvalParams to use while evaluating a top-level txgroup
func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.ConsensusParams, specials *transactions.SpecialAddresses) *EvalParams {
apps := 0
+ var allBoxes map[boxRef]bool
for _, tx := range txgroup {
if tx.Txn.Type == protocol.ApplicationCallTx {
apps++
+ if allBoxes == nil && len(tx.Txn.Boxes) > 0 {
+ allBoxes = make(map[boxRef]bool)
+ }
+ for _, br := range tx.Txn.Boxes {
+ var app basics.AppIndex
+ if br.Index == 0 {
+ // "current app": Ignore if this is a create, else use ApplicationID
+ if tx.Txn.ApplicationID == 0 {
+ // When the create actually happens, and we learn the appID, we'll add it.
+ continue
+ }
+ app = tx.Txn.ApplicationID
+ } else {
+ // Bounds check will already have been done by
+ // WellFormed. For testing purposes, it's better to panic
+ // now than after returning a nil.
+ app = tx.Txn.ForeignApps[br.Index-1] // shift for the 0=this convention
+ }
+ allBoxes[boxRef{app, string(br.Name)}] = false
+ }
}
}
@@ -351,15 +405,14 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
FeeCredit: &credit,
PooledApplicationBudget: pooledApplicationBudget,
pooledAllowedInners: pooledAllowedInners,
- created: &resources{},
+ available: &resources{boxes: allBoxes},
appAddrCache: make(map[basics.AppIndex]basics.Address),
}
}
// feeCredit returns the extra fee supplied in this top-level txgroup compared
// to required minfee. It can make assumptions about overflow because the group
-// is known OK according to TxnGroupBatchVerify. (In essence the group is
-// "WellFormed")
+// is known OK according to txnGroupBatchPrep. (The group is "WellFormed")
func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
minFeeCount := uint64(0)
feesPaid := uint64(0)
@@ -369,10 +422,9 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
}
feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw)
}
- // Overflow is impossible, because TxnGroupBatchVerify checked.
+ // Overflow is impossible, because txnGroupBatchPrep checked.
feeNeeded := minFee * minFeeCount
-
- return feesPaid - feeNeeded
+ return basics.SubSaturate(feesPaid, feeNeeded)
}
// NewInnerEvalParams creates an EvalParams to be used while evaluating an inner group txgroup
@@ -400,16 +452,21 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
Trace: caller.Trace,
TxnGroup: txg,
pastScratch: make([]*scratchSpace, len(txg)),
+ logger: caller.logger,
+ SigLedger: caller.SigLedger,
+ Ledger: caller.Ledger,
+ Debugger: nil, // See #4438, where this becomes caller.Debugger
MinAvmVersion: &minAvmVersion,
FeeCredit: caller.FeeCredit,
Specials: caller.Specials,
PooledApplicationBudget: caller.PooledApplicationBudget,
pooledAllowedInners: caller.pooledAllowedInners,
- SigLedger: caller.SigLedger,
- Ledger: caller.Ledger,
- created: caller.created,
+ available: caller.available,
+ ioBudget: caller.ioBudget,
+ readBudgetChecked: true, // don't check for inners
appAddrCache: caller.appAddrCache,
- caller: caller,
+ // read comment in EvalParams declaration about txid caches
+ caller: caller,
}
return ep
}
@@ -458,17 +515,17 @@ func (ep *EvalParams) log() logging.Logger {
// package. For example, after a acfg transaction is processed, the AD created
// by the acfg is added to the EvalParams this way.
func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) {
- if ep.created == nil {
+ if ep.available == nil {
// This is a simplified ep. It won't be used for app evaluation, and
// shares the TxnGroup memory with the caller. Don't touch anything!
return
}
ep.TxnGroup[gi].ApplyData = ad
if aid := ad.ConfigAsset; aid != 0 {
- ep.created.asas = append(ep.created.asas, aid)
+ ep.available.asas = append(ep.available.asas, aid)
}
if aid := ad.ApplicationID; aid != 0 {
- ep.created.apps = append(ep.created.apps, aid)
+ ep.available.apps = append(ep.available.apps, aid)
}
}
@@ -604,10 +661,6 @@ func (st StackType) Typed() bool {
return false
}
-func (sts StackTypes) plus(other StackTypes) StackTypes {
- return append(sts, other...)
-}
-
// PanicError wraps a recover() catching a panic()
type PanicError struct {
PanicValue interface{}
@@ -659,10 +712,54 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
}
}
+ // If this is a creation, make any "0 index" box refs available now that we
+ // have an appID.
+ if cx.txn.Txn.ApplicationID == 0 {
+ for _, br := range cx.txn.Txn.Boxes {
+ if br.Index == 0 {
+ cx.EvalParams.available.boxes[boxRef{cx.appID, string(br.Name)}] = false
+ }
+ }
+ }
+
+ // Check the I/O budget for reading if this is the first top-level app call
+ if cx.caller == nil && !cx.readBudgetChecked {
+ boxRefCount := uint64(0) // Intentionally counts duplicates
+ for _, tx := range cx.TxnGroup {
+ boxRefCount += uint64(len(tx.Txn.Boxes))
+ }
+ cx.ioBudget = boxRefCount * cx.Proto.BytesPerBoxReference
+
+ used := uint64(0)
+ for br := range cx.available.boxes {
+ if len(br.name) == 0 {
+ // 0 length names are not allowed for actual created boxes, but
+ // may have been used to add I/O budget.
+ continue
+ }
+ box, ok, err := cx.Ledger.GetBox(br.app, br.name)
+ if err != nil {
+ return false, nil, err
+ }
+ if !ok {
+ continue
+ }
+ size := uint64(len(box))
+ cx.available.boxes[br] = false
+
+ used = basics.AddSaturate(used, size)
+ if used > cx.ioBudget {
+ return false, nil, fmt.Errorf("box read budget (%d) exceeded", cx.ioBudget)
+ }
+ }
+ cx.readBudgetChecked = true
+ }
+
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- enter %d %s %v\n", aid, cx.txn.Txn.OnCompletion, cx.txn.Txn.ApplicationArgs)
}
pass, err := eval(program, &cx)
+
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- exit %d accept=%t\n", aid, pass)
}
@@ -709,33 +806,15 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
errstr += cx.Trace.String()
}
err = PanicError{x, errstr}
- cx.EvalParams.log().Errorf("recovered panic in Eval: %w", err)
- }
- }()
-
- defer func() {
- // Ensure we update the debugger before exiting
- if cx.Debugger != nil {
- errDbg := cx.Debugger.Complete(cx.refreshDebugState(err))
- if err == nil {
- err = errDbg
- }
+ cx.EvalParams.log().Errorf("recovered panic in Eval: %v", err)
}
}()
- if (cx.EvalParams.Proto == nil) || (cx.EvalParams.Proto.LogicSigVersion == 0) {
- err = errLogicSigNotSupported
- return
- }
- if cx.txn.Lsig.Args != nil && len(cx.txn.Lsig.Args) > transactions.EvalMaxArgs {
- err = errTooManyArgs
- return
- }
+ // Avoid returning for any reason until after cx.debugState is setup. That
+ // require cx to be minimally setup, too.
- version, vlen, err := versionCheck(program, cx.EvalParams)
- if err != nil {
- return false, err
- }
+ version, vlen, verr := versionCheck(program, cx.EvalParams)
+ // defer verr check until after cx and debugState is setup
cx.version = version
cx.pc = vlen
@@ -751,6 +830,23 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
if derr := cx.Debugger.Register(cx.refreshDebugState(err)); derr != nil {
return false, derr
}
+ defer func() {
+ // Ensure we update the debugger before exiting
+ errDbg := cx.Debugger.Complete(cx.refreshDebugState(err))
+ if err == nil {
+ err = errDbg
+ }
+ }()
+ }
+
+ if (cx.EvalParams.Proto == nil) || (cx.EvalParams.Proto.LogicSigVersion == 0) {
+ return false, errLogicSigNotSupported
+ }
+ if cx.txn.Lsig.Args != nil && len(cx.txn.Lsig.Args) > transactions.EvalMaxArgs {
+ return false, errTooManyArgs
+ }
+ if verr != nil {
+ return false, verr
}
for (err == nil) && (cx.pc < len(cx.program)) {
@@ -1126,6 +1222,15 @@ func (cx *EvalContext) checkStep() (int, error) {
return opcost, nil
}
+func (cx *EvalContext) ensureStackCap(targetCap int) {
+ if cap(cx.stack) < targetCap {
+ // Let's grow all at once, plus a little slack.
+ newStack := make([]stackValue, len(cx.stack), targetCap+4)
+ copy(newStack, cx.stack)
+ cx.stack = newStack
+ }
+}
+
func opErr(cx *EvalContext) error {
return errors.New("err opcode executed")
}
@@ -1855,7 +1960,7 @@ func opBytesZero(cx *EvalContext) error {
func opIntConstBlock(cx *EvalContext) error {
var err error
- cx.intc, cx.nextpc, err = parseIntcblock(cx.program, cx.pc+1)
+ cx.intc, cx.nextpc, err = parseIntImmArgs(cx.program, cx.pc+1)
return err
}
@@ -1895,9 +2000,24 @@ func opPushInt(cx *EvalContext) error {
return nil
}
+func opPushInts(cx *EvalContext) error {
+ intc, nextpc, err := parseIntImmArgs(cx.program, cx.pc+1)
+ if err != nil {
+ return err
+ }
+ finalLen := len(cx.stack) + len(intc)
+ cx.ensureStackCap(finalLen)
+ for _, cint := range intc {
+ sv := stackValue{Uint: cint}
+ cx.stack = append(cx.stack, sv)
+ }
+ cx.nextpc = nextpc
+ return nil
+}
+
func opByteConstBlock(cx *EvalContext) error {
var err error
- cx.bytec, cx.nextpc, err = parseBytecBlock(cx.program, cx.pc+1)
+ cx.bytec, cx.nextpc, err = parseByteImmArgs(cx.program, cx.pc+1)
return err
}
@@ -1942,6 +2062,21 @@ func opPushBytes(cx *EvalContext) error {
return nil
}
+func opPushBytess(cx *EvalContext) error {
+ cbytess, nextpc, err := parseByteImmArgs(cx.program, cx.pc+1)
+ if err != nil {
+ return err
+ }
+ finalLen := len(cx.stack) + len(cbytess)
+ cx.ensureStackCap(finalLen)
+ for _, cbytes := range cbytess {
+ sv := stackValue{Bytes: cbytes}
+ cx.stack = append(cx.stack, sv)
+ }
+ cx.nextpc = nextpc
+ return nil
+}
+
func opArgN(cx *EvalContext, n uint64) error {
if n >= uint64(len(cx.txn.Lsig.Args)) {
return fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.txn.Lsig.Args))
@@ -2119,6 +2254,44 @@ func opSwitch(cx *EvalContext) error {
return nil
}
+func opMatch(cx *EvalContext) error {
+ n := int(cx.program[cx.pc+1])
+ // stack contains the n sized match list and the single match value
+ if n+1 > len(cx.stack) {
+ return fmt.Errorf("match expects %d stack args while stack only contains %d", n+1, len(cx.stack))
+ }
+
+ last := len(cx.stack) - 1
+ matchVal := cx.stack[last]
+ cx.stack = cx.stack[:last]
+
+ argBase := len(cx.stack) - n
+ matchList := cx.stack[argBase:]
+ cx.stack = cx.stack[:argBase]
+
+ matchedIdx := n
+ for i, stackArg := range matchList {
+ if stackArg.argType() != matchVal.argType() {
+ continue
+ }
+
+ if matchVal.argType() == StackBytes && bytes.Equal(matchVal.Bytes, stackArg.Bytes) {
+ matchedIdx = i
+ break
+ } else if matchVal.argType() == StackUint64 && matchVal.Uint == stackArg.Uint {
+ matchedIdx = i
+ break
+ }
+ }
+
+ target, err := switchTarget(cx, uint64(matchedIdx))
+ if err != nil {
+ return err
+ }
+ cx.nextpc = target
+ return nil
+}
+
const protoByte = 0x8a
func opCallSub(cx *EvalContext) error {
@@ -3011,7 +3184,7 @@ func (cx *EvalContext) getRound() uint64 {
}
func (cx *EvalContext) getLatestTimestamp() (uint64, error) {
- ts := cx.Ledger.LatestTimestamp()
+ ts := cx.Ledger.PrevTimestamp()
if ts < 0 {
return 0, fmt.Errorf("latest timestamp %d < 0", ts)
}
@@ -3626,24 +3799,30 @@ func opSetByte(cx *EvalContext) error {
return nil
}
-func opExtractImpl(x []byte, start, length int) ([]byte, error) {
+func extractCarefully(x []byte, start, length uint64) ([]byte, error) {
+ if start > uint64(len(x)) {
+ return nil, fmt.Errorf("extraction start %d is beyond length: %d", start, len(x))
+ }
end := start + length
- if start > len(x) || end > len(x) {
- return nil, errors.New("extract range beyond length of string")
+ if end < start {
+ return nil, fmt.Errorf("extraction end exceeds uint64")
+ }
+ if end > uint64(len(x)) {
+ return nil, fmt.Errorf("extraction end %d is beyond length: %d", end, len(x))
}
return x[start:end], nil
}
func opExtract(cx *EvalContext) error {
last := len(cx.stack) - 1
- startIdx := cx.program[cx.pc+1]
- lengthIdx := cx.program[cx.pc+2]
+ start := uint64(cx.program[cx.pc+1])
+ length := uint64(cx.program[cx.pc+2])
// Shortcut: if length is 0, take bytes from start index to the end
- length := int(lengthIdx)
if length == 0 {
- length = len(cx.stack[last].Bytes) - int(startIdx)
+ // If length has wrapped, it's because start > len(), so extractCarefully will report
+ length = uint64(len(cx.stack[last].Bytes) - int(start))
}
- bytes, err := opExtractImpl(cx.stack[last].Bytes, int(startIdx), length)
+ bytes, err := extractCarefully(cx.stack[last].Bytes, start, length)
cx.stack[last].Bytes = bytes
return err
}
@@ -3651,18 +3830,18 @@ func opExtract(cx *EvalContext) error {
func opExtract3(cx *EvalContext) error {
last := len(cx.stack) - 1 // length
prev := last - 1 // start
- byteArrayIdx := prev - 1 // bytes
- startIdx := cx.stack[prev].Uint
- lengthIdx := cx.stack[last].Uint
- if startIdx > math.MaxInt32 || lengthIdx > math.MaxInt32 {
- return errors.New("extract range beyond length of string")
- }
- bytes, err := opExtractImpl(cx.stack[byteArrayIdx].Bytes, int(startIdx), int(lengthIdx))
- cx.stack[byteArrayIdx].Bytes = bytes
+ pprev := prev - 1 // bytes
+
+ start := cx.stack[prev].Uint
+ length := cx.stack[last].Uint
+ bytes, err := extractCarefully(cx.stack[pprev].Bytes, start, length)
+ cx.stack[pprev].Bytes = bytes
cx.stack = cx.stack[:prev]
return err
}
+// replaceCarefully is used to make a NEW byteslice copy of original, with
+// replacement written over the bytes starting at start.
func replaceCarefully(original []byte, replacement []byte, start uint64) ([]byte, error) {
if start > uint64(len(original)) {
return nil, fmt.Errorf("replacement start %d beyond length: %d", start, len(original))
@@ -3731,11 +3910,11 @@ func convertBytesToInt(x []byte) uint64 {
return out
}
-func opExtractNBytes(cx *EvalContext, n int) error {
+func opExtractNBytes(cx *EvalContext, n uint64) error {
last := len(cx.stack) - 1 // start
prev := last - 1 // bytes
- startIdx := cx.stack[last].Uint
- bytes, err := opExtractImpl(cx.stack[prev].Bytes, int(startIdx), n) // extract n bytes
+ start := cx.stack[last].Uint
+ bytes, err := extractCarefully(cx.stack[prev].Bytes, start, n) // extract n bytes
if err != nil {
return err
}
@@ -3784,7 +3963,7 @@ func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uin
invalidIndex := uint64(len(cx.txn.Txn.Accounts) + 1)
// Allow an address for an app that was created in group
if err != nil && cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
createdAddress := cx.getApplicationAddress(appID)
if addr == createdAddress {
return addr, invalidIndex, nil
@@ -3914,13 +4093,8 @@ func opAppLocalGetEx(cx *EvalContext) error {
return err
}
- var isOk stackValue
- if ok {
- isOk.Uint = 1
- }
-
cx.stack[pprev] = result
- cx.stack[prev] = isOk
+ cx.stack[prev] = boolToSV(ok)
cx.stack = cx.stack[:last]
return nil
}
@@ -3989,13 +4163,8 @@ func opAppGlobalGetEx(cx *EvalContext) error {
return err
}
- var isOk stackValue
- if ok {
- isOk.Uint = 1
- }
-
cx.stack[prev] = result
- cx.stack[last] = isOk
+ cx.stack[last] = boolToSV(ok)
return nil
}
@@ -4007,6 +4176,13 @@ func opAppLocalPut(cx *EvalContext) error {
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
+ // Enforce key lengths. Now, this is the same as enforced by ledger, but if
+ // it ever to change in proto, we would need to isolate changes to different
+ // program versions. (so a v6 app could not see a bigger key, for example)
+ if len(key) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cx.Proto.MaxAppKeyLen)
+ }
+
addr, accountIdx, err := cx.mutableAccountReference(cx.stack[pprev])
if err != nil {
return err
@@ -4026,6 +4202,17 @@ func opAppLocalPut(cx *EvalContext) error {
}
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
}
+
+ // Enforce maximum value length (also enforced by ledger)
+ if tv.Type == basics.TealBytesType {
+ if len(tv.Bytes) > cx.Proto.MaxAppBytesValueLen {
+ return fmt.Errorf("value too long for key 0x%x: length was %d", key, len(tv.Bytes))
+ }
+ if sum := len(key) + len(tv.Bytes); sum > cx.Proto.MaxAppSumKeyValueLens {
+ return fmt.Errorf("key/value total too long for key 0x%x: sum was %d", key, sum)
+ }
+ }
+
err = cx.Ledger.SetLocal(addr, cx.appID, key, tv, accountIdx)
if err != nil {
return err
@@ -4042,6 +4229,14 @@ func opAppGlobalPut(cx *EvalContext) error {
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
+ // Enforce maximum key length. Currently this is the same as enforced by
+ // ledger. If it were ever to change in proto, we would need to isolate
+ // changes to different program versions. (so a v6 app could not see a
+ // bigger key, for example)
+ if len(key) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cx.Proto.MaxAppKeyLen)
+ }
+
// if writing the same value, don't record in EvalDelta, matching ledger
// behavior with previous BuildEvalDelta mechanism
etv, ok, err := cx.Ledger.GetGlobal(cx.appID, key)
@@ -4053,6 +4248,16 @@ func opAppGlobalPut(cx *EvalContext) error {
cx.txn.EvalDelta.GlobalDelta[key] = tv.ToValueDelta()
}
+ // Enforce maximum value length (also enforced by ledger)
+ if tv.Type == basics.TealBytesType {
+ if len(tv.Bytes) > cx.Proto.MaxAppBytesValueLen {
+ return fmt.Errorf("value too long for key 0x%x: length was %d", key, len(tv.Bytes))
+ }
+ if sum := len(key) + len(tv.Bytes); sum > cx.Proto.MaxAppSumKeyValueLens {
+ return fmt.Errorf("key/value total too long for key 0x%x: sum was %d", key, sum)
+ }
+ }
+
err = cx.Ledger.SetGlobal(cx.appID, key, tv)
if err != nil {
return err
@@ -4138,7 +4343,7 @@ func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, e
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
if appID == basics.AppIndex(ref) {
return appID, nil
}
@@ -4177,7 +4382,7 @@ func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex,
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.created.asas {
+ for _, assetID := range cx.available.asas {
if assetID == basics.AssetIndex(ref) {
return assetID, nil
}
@@ -4338,6 +4543,26 @@ func opAcctParamsGet(cx *EvalContext) error {
value.Uint = account.MinBalance(cx.Proto).Raw
case AcctAuthAddr:
value.Bytes = account.AuthAddr[:]
+
+ case AcctTotalNumUint:
+ value.Uint = uint64(account.TotalAppSchema.NumUint)
+ case AcctTotalNumByteSlice:
+ value.Uint = uint64(account.TotalAppSchema.NumByteSlice)
+ case AcctTotalExtraAppPages:
+ value.Uint = uint64(account.TotalExtraAppPages)
+
+ case AcctTotalAppsCreated:
+ value.Uint = account.TotalAppParams
+ case AcctTotalAppsOptedIn:
+ value.Uint = account.TotalAppLocalStates
+ case AcctTotalAssetsCreated:
+ value.Uint = account.TotalAssetParams
+ case AcctTotalAssets:
+ value.Uint = account.TotalAssets
+ case AcctTotalBoxes:
+ value.Uint = account.TotalBoxes
+ case AcctTotalBoxBytes:
+ value.Uint = account.TotalBoxBytes
}
cx.stack[last] = value
cx.stack = append(cx.stack, boolToSV(account.MicroAlgos.Raw > 0))
@@ -4464,7 +4689,7 @@ func (cx *EvalContext) availableAsset(sv stackValue) (basics.AssetIndex, error)
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.created.asas {
+ for _, assetID := range cx.available.asas {
if assetID == aid {
return aid, nil
}
@@ -4492,7 +4717,7 @@ func (cx *EvalContext) availableApp(sv stackValue) (basics.AppIndex, error) {
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
if appID == aid {
return aid, nil
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 35d8f28e3..25a073ba3 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -117,6 +117,10 @@ func TestFieldTypes(t *testing.T) {
TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field XferAsset;"), ep, "not a uint64")
TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field AssetAmount;"), ep, "not a uint64")
+ // get coverage on uintMaxed()
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field ExtraProgramPages;"), ep, "not a uint64")
+ // get coverage on bool()
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Nonparticipation;"), ep, "not a uint64")
}
func appAddr(id int) basics.Address {
@@ -253,7 +257,7 @@ func TestRekeyPay(t *testing.T) {
TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay+"; int 1", ep)
// Note that the Sender would fail min balance check if we did it here.
// It seems proper to wait until end of txn though.
- // See explanation in logicLedger's Perform()
+ // See explanation in cowRoundState's Perform()
}
func TestRekeyBack(t *testing.T) {
@@ -496,7 +500,7 @@ func TestNumInnerPooled(t *testing.T) {
tx := txntest.Txn{
Type: protocol.ApplicationCallTx,
}.SignedTxn()
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(tx.Txn.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), 1000000)
short := pay + ";int 1"
@@ -770,6 +774,8 @@ func TestFieldSetting(t *testing.T) {
"not an address")
TestApp(t, "itxn_begin; int 6; bzero; itxn_field ConfigAssetUnitName; int 1", ep)
+ TestApp(t, NoTrack("itxn_begin; int 6; itxn_field ConfigAssetUnitName; int 1"), ep,
+ "not a byte array")
TestApp(t, "itxn_begin; int 7; bzero; itxn_field ConfigAssetUnitName; int 1", ep,
"value is too long")
@@ -782,6 +788,8 @@ func TestInnerGroup(t *testing.T) {
partitiontest.PartitionTest(t)
ep, tx, ledger := MakeSampleEnv()
+ ep.FeeCredit = nil // default sample env starts at 401
+
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
// Need both fees and both payments
ledger.NewAccount(appAddr(888), 999+2*MakeTestProto().MinTxnFee)
@@ -802,6 +810,8 @@ func TestInnerFeePooling(t *testing.T) {
partitiontest.PartitionTest(t)
ep, tx, ledger := MakeSampleEnv()
+ ep.FeeCredit = nil // default sample env starts at 401
+
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), 50_000)
pay := `
@@ -1751,12 +1761,17 @@ int 1
`
for _, unified := range []bool{true, false} {
- t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- // t.Parallel() NO! unified variable is actually shared
-
+ t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) { //nolint:paralleltest // NO t.Parallel(). unified variable is actually shared
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
+ // Whenever MakeSampleEnv() is changed to create a different
+ // transaction, we must reverse those changes here, so that the
+ // historic test is correct.
+ parentTx.Type = protocol.PaymentTx
+ parentTx.Boxes = nil
+ ep.FeeCredit = nil // else inner's fee will change
+
parentTx.ApplicationID = parentAppID
parentTx.ForeignApps = []basics.AppIndex{
childAppID,
@@ -2073,11 +2088,18 @@ int 1
for _, unified := range []bool{true, false} {
t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- t.Parallel()
+ // t.Parallel() NO! unified variable is actually shared
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
+ // Whenever MakeSampleEnv() is changed to create a different
+ // transaction, we must reverse those changes here, so that the
+ // historic test is correct.
+ parentTx.Type = protocol.PaymentTx
+ parentTx.Boxes = nil
+ ep.FeeCredit = nil // else inner's fee will change
+
parentTx.ApplicationID = parentAppID
parentTx.ForeignApps = []basics.AppIndex{
childAppID,
@@ -2202,10 +2224,8 @@ func TestInnerTxIDCaching(t *testing.T) {
parentAppID := basics.AppIndex(888)
childAppID := basics.AppIndex(222)
- for _, unified := range []bool{true, false} {
+ for _, unified := range []bool{true, false} { //nolint:paralleltest // NO t.Parallel(). unified variable is actually shared
t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- // t.Parallel() NO! unified variable is actually shared
-
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
@@ -2548,7 +2568,7 @@ func TestNumInnerDeep(t *testing.T) {
ForeignApps: []basics.AppIndex{basics.AppIndex(222)},
}.SignedTxnWithAD()
require.Equal(t, 888, int(tx.Txn.ApplicationID))
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
pay3 := TestProg(t, pay+pay+pay+"int 1;", AssemblerMaxVersion).Program
ledger.NewApp(tx.Txn.Receiver, 222, basics.AppParams{
@@ -2929,6 +2949,7 @@ done:
func TestInfiniteRecursion(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
ep, tx, ledger := MakeSampleEnv()
source := `
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index 773330fab..e3dfff2f7 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -41,8 +41,8 @@ import (
func TestKeccak256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip install sha3
import sha3
@@ -58,8 +58,8 @@ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567
func TestSHA3_256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip install hashlib
import hashlib
@@ -74,8 +74,8 @@ byte 0xd757297405c5c89f7ceca368ee76c2f1893ee24f654e60032e65fb53b01aae10
func TestSHA512_256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip cryptography
from cryptography.hazmat.backends import default_backend
@@ -176,8 +176,8 @@ pop // output`, "int 1"},
func TestEd25519verify(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
var s crypto.Seed
crypto.RandBytes(s[:])
c := crypto.GenerateSignatureSecrets(s)
@@ -200,26 +200,26 @@ ed25519verify`, pkStr), v)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{data[:], sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
// short sig will fail
txn.Lsig.Args[1] = sig[1:]
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "invalid signature")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "invalid signature")
// flip a bit and it should not pass
msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
txn.Lsig.Args = [][]byte{data1, sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "REJECT")
})
}
}
func TestEd25519VerifyBare(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
var s crypto.Seed
crypto.RandBytes(s[:])
c := crypto.GenerateSignatureSecrets(s)
@@ -240,18 +240,18 @@ ed25519verify_bare`, pkStr), v)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{data[:], sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
// short sig will fail
txn.Lsig.Args[1] = sig[1:]
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "invalid signature")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "invalid signature")
// flip a bit and it should not pass
msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
txn.Lsig.Args = [][]byte{data1, sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "REJECT")
})
}
}
@@ -446,7 +446,7 @@ ecdsa_verify Secp256k1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
ops := testProg(t, source, 5)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- pass, err := EvalSignature(0, defaultEvalParamsWithVersion(&txn, 5))
+ pass, err := EvalSignature(0, defaultEvalParamsWithVersion(5, txn))
require.NoError(t, err)
require.True(t, pass)
}
@@ -552,7 +552,7 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
ops := testProg(t, source, fidoVersion)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- pass, err := EvalSignature(0, defaultEvalParamsWithVersion(&txn, fidoVersion))
+ pass, err := EvalSignature(0, defaultEvalParamsWithVersion(fidoVersion, txn))
require.NoError(t, err)
require.True(t, pass)
}
@@ -560,6 +560,7 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
// test compatibility with ethereum signatures
func TestEcdsaEthAddress(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
/*
pip install eth-keys pycryptodome
@@ -589,6 +590,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
func TestEcdsaCostVariation(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Doesn't matter if the actual verify returns true or false. Just confirm the cost depends on curve.
source := `
@@ -689,7 +691,7 @@ ed25519verify`, pkStr), AssemblerMaxVersion)
var txn transactions.SignedTxn
txn.Lsig.Logic = programs[i]
txn.Lsig.Args = [][]byte{data[i][:], signatures[i][:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(programs[i]))
@@ -774,7 +776,7 @@ func benchmarkEcdsa(b *testing.B, source string, curve EcdsaCurve) {
var txn transactions.SignedTxn
txn.Lsig.Logic = data[i].programs
txn.Lsig.Args = [][]byte{data[i].msg[:], data[i].r, data[i].s, data[i].x, data[i].y, data[i].pk, {uint8(data[i].v)}}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(data[i].programs))
@@ -897,7 +899,7 @@ func benchmarkBn256(b *testing.B, source string) {
var txn transactions.SignedTxn
txn.Lsig.Logic = data[i].programs
txn.Lsig.Args = [][]byte{data[i].a, data[i].k, data[i].g1, data[i].g2}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(data[i].programs))
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index f5c87abb3..0d0676c16 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -49,17 +49,23 @@ func makeSampleEnv() (*EvalParams, *transactions.Transaction, *Ledger) {
}
func makeSampleEnvWithVersion(version uint64) (*EvalParams, *transactions.Transaction, *Ledger) {
- ep := defaultEvalParamsWithVersion(nil, version)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(makeSampleTxnGroup(makeSampleTxn()))
- ledger := MakeLedger(map[basics.Address]uint64{})
+ // We'd usually like an app in the group, so that the ep created is
+ // "complete". But to keep as many old tests working as possible, if
+ // version < appsEnabledVersion, don't put an appl txn in it.
+ firstTxn := makeSampleTxn()
+ if version >= appsEnabledVersion {
+ firstTxn.Txn.Type = protocol.ApplicationCallTx
+ }
+ ep := defaultEvalParamsWithVersion(version, makeSampleTxnGroup(firstTxn)...)
+ ledger := NewLedger(nil)
ep.SigLedger = ledger
ep.Ledger = ledger
return ep, &ep.TxnGroup[0].Txn, ledger
}
func makeOldAndNewEnv(version uint64) (*EvalParams, *EvalParams, *Ledger) {
- new, _, sharedLedger := makeSampleEnv()
- old, _, _ := makeSampleEnvWithVersion(version)
+ new, _, sharedLedger := makeSampleEnvWithVersion(version)
+ old, _, _ := makeSampleEnvWithVersion(version - 1)
old.Ledger = sharedLedger
return old, new, sharedLedger
}
@@ -238,8 +244,8 @@ log
// check err opcode work in both modes
source := "err"
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "err opcode executed")
- testApp(t, source, defaultEvalParams(nil), "err opcode executed")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "err opcode executed")
+ testApp(t, source, defaultEvalParams(), "err opcode executed")
// check that ed25519verify and arg is not allowed in stateful mode between v2-v4
disallowedV4 := []string{
@@ -252,7 +258,7 @@ log
}
for _, source := range disallowedV4 {
ops := testProg(t, source, 4)
- testAppBytes(t, ops.Program, defaultEvalParams(nil),
+ testAppBytes(t, ops.Program, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -266,7 +272,7 @@ log
}
for _, source := range disallowed {
ops := testProg(t, source, AssemblerMaxVersion)
- testAppBytes(t, ops.Program, defaultEvalParams(nil),
+ testAppBytes(t, ops.Program, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -289,7 +295,7 @@ log
}
for _, source := range statefulOpcodeCalls {
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil),
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -328,7 +334,7 @@ func TestBalance(t *testing.T) {
testApp(t, text, ep)
}
-func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, version uint64, ledger LedgerForLogic,
+func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, version uint64, ledger *Ledger,
expected ...Expect) {
t.Helper()
codes := make([][]byte, len(programs))
@@ -348,8 +354,9 @@ func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn,
}
ep := NewEvalParams(transactions.WrapSignedTxnsWithAD(txgroup), makeTestProtoV(version), &transactions.SpecialAddresses{})
if ledger == nil {
- ledger = MakeLedger(nil)
+ ledger = NewLedger(nil)
}
+ ledger.Reset()
ep.Ledger = ledger
testAppsBytes(t, codes, ep, expected...)
}
@@ -359,11 +366,15 @@ func testAppsBytes(t *testing.T, programs [][]byte, ep *EvalParams, expected ...
require.Equal(t, len(programs), len(ep.TxnGroup))
for i := range ep.TxnGroup {
if programs[i] != nil {
+ appID := ep.TxnGroup[i].Txn.ApplicationID
+ if appID == 0 {
+ appID = basics.AppIndex(888)
+ }
if len(expected) > 0 && expected[0].l == i {
- testAppFull(t, programs[i], i, basics.AppIndex(888), ep, expected[0].s)
+ testAppFull(t, programs[i], i, appID, ep, expected[0].s)
break // Stop after first failure
} else {
- testAppFull(t, programs[i], i, basics.AppIndex(888), ep)
+ testAppFull(t, programs[i], i, appID, ep)
}
}
}
@@ -379,7 +390,7 @@ func testAppBytes(t *testing.T, program []byte, ep *EvalParams, problems ...stri
t.Helper()
ep.reset()
aid := ep.TxnGroup[0].Txn.ApplicationID
- if aid == basics.AppIndex(0) {
+ if aid == 0 {
aid = basics.AppIndex(888)
}
return testAppFull(t, program, 0, aid, ep, problems...)
@@ -423,7 +434,7 @@ func testAppFull(t *testing.T, program []byte, gi int, aid basics.AppIndex, ep *
// the best way to be concise about all sorts of tests.
if ep.Ledger == nil {
- ep.Ledger = MakeLedger(nil)
+ ep.Ledger = NewLedger(nil)
}
pass, err := EvalApp(program, gi, aid, ep)
@@ -488,24 +499,13 @@ func TestMinBalance(t *testing.T) {
func TestAppCheckOptedIn(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- txn := makeSampleTxn()
- txgroup := makeSampleTxnGroup(txn)
- now := defaultEvalParams(&txn)
- now.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
- pre := defaultEvalParamsWithVersion(&txn, directRefEnabledVersion-1)
- pre.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Receiver: 1,
- txn.Txn.Sender: 1,
- },
- )
- now.Ledger = ledger
- pre.Ledger = ledger
+ txn := pre.TxnGroup[0]
+ ledger.NewAccount(txn.Txn.Receiver, 1)
+ ledger.NewAccount(txn.Txn.Sender, 1)
testApp(t, "int 2; int 100; app_opted_in; int 1; ==", now, "invalid Account reference")
// Receiver is not opted in
@@ -561,7 +561,7 @@ exit:
int 1
==`
- pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion - 1)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
ledger.NewAccount(now.TxnGroup[0].Txn.Receiver, 1)
testApp(t, text, now, "invalid Account reference")
@@ -694,7 +694,6 @@ int 0
func TestAppReadGlobalState(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
text := `int 0
@@ -720,7 +719,7 @@ byte 0x414c474f
==
&&
`
- pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion - 1)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
ledger.NewAccount(now.TxnGroup[0].Txn.Sender, 1)
now.TxnGroup[0].Txn.ApplicationID = 100
@@ -770,13 +769,13 @@ int 4141
now.TxnGroup[0].Txn.ApplicationID = 0
now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{100}
- testAppFull(t, testProg(t, text, LogicVersion).Program, 0, 100, now)
+ testAppFull(t, testProg(t, text, directRefEnabledVersion).Program, 0, 100, now)
// Direct reference to the current app also works
now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{}
- testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "int 100", -1), LogicVersion).Program,
+ testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "int 100", -1), directRefEnabledVersion).Program,
0, 100, now)
- testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "global CurrentApplicationID", -1), LogicVersion).Program,
+ testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "global CurrentApplicationID", -1), directRefEnabledVersion).Program,
0, 100, now)
}
@@ -918,11 +917,11 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
}
}
- txn := makeSampleTxn()
- pre := defaultEvalParamsWithVersion(&txn, directRefEnabledVersion-1)
+ txn := makeSampleAppl(888)
+ pre := defaultEvalParamsWithVersion(directRefEnabledVersion-1, txn)
require.GreaterOrEqual(t, version, uint64(directRefEnabledVersion))
- now := defaultEvalParamsWithVersion(&txn, version)
- ledger := MakeLedger(
+ now := defaultEvalParamsWithVersion(version, txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1132,6 +1131,59 @@ func TestAcctParams(t *testing.T) {
source = "int 0; acct_params_get AcctAuthAddr; assert; global ZeroAddress; =="
testApp(t, source, ep)
+
+ // No apps or schema at first, then 1 created and the global schema noted
+ source = "int 0; acct_params_get AcctTotalAppsCreated; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; !"
+ testApp(t, source, ep)
+ ledger.NewApp(tx.Sender, 2000, basics.AppParams{
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{
+ NumUint: 6,
+ NumByteSlice: 7,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumUint: 8,
+ NumByteSlice: 9,
+ },
+ },
+ ExtraProgramPages: 2,
+ })
+ source = "int 0; acct_params_get AcctTotalAppsCreated; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; int 2; =="
+ testApp(t, source, ep)
+
+ // Not opted in at first, then opted into 1, schema added
+ source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; !"
+ testApp(t, source, ep)
+ ledger.NewLocals(tx.Sender, 2000)
+ source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; int 6; +; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; int 7; +; =="
+ testApp(t, source, ep)
+
+ // No ASAs at first, then 1 created AND in total
+ source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalAssets; assert; !"
+ testApp(t, source, ep)
+ ledger.NewAsset(tx.Sender, 3000, basics.AssetParams{})
+ source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalAssets; assert; int 1; =="
+ testApp(t, source, ep)
}
func TestGlobalNonDelete(t *testing.T) {
@@ -1230,13 +1282,14 @@ intc_1
ops := testProg(t, source, AssemblerMaxVersion)
- txn := makeSampleTxn()
+ var txn transactions.SignedTxn
+ txn.Txn.Type = protocol.ApplicationCallTx
txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckContract(ops.Program, ep)
require.NoError(t, err)
- ledger := MakeLedger(
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1284,13 +1337,11 @@ intc_1
func TestAppLocalStateReadWrite(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1511,6 +1562,36 @@ int 1
require.Equal(t, uint64(0x79), vd.Uint)
}
+func TestAppLocalGlobalErrorCases(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, tx, ledger := makeSampleEnv()
+ ledger.NewApp(tx.Sender, 888, basics.AppParams{})
+
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+
+ ledger.NewLocals(tx.Sender, 888)
+ testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+
+ testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+
+ testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+
+ testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+
+ ep.Proto.MaxAppSumKeyValueLens = 2 // Override to generate error.
+ testApp(t, `byte "foo"; byte "foo"; app_global_put; int 1`, ep, "key/value total too long for key")
+
+ testApp(t, `int 0; byte "foo"; byte "foo"; app_local_put; int 1`, ep, "key/value total too long for key")
+}
+
func TestAppGlobalReadWriteDeleteErrors(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1635,11 +1716,10 @@ int 0x77
==
&&
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
+ txn := makeSampleAppl(100)
txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1774,24 +1854,19 @@ ok2:
byte "myval"
==
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID, 101}
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
+
+ ep, txn, ledger := makeSampleEnv()
+ txn.ApplicationID = 100
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID, 101}
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
delta := testApp(t, source, ep, "no such app")
require.Empty(t, delta.GlobalDelta)
require.Empty(t, delta.LocalDeltas)
- ledger.NewApp(txn.Txn.Receiver, 101, basics.AppParams{})
- ledger.NewApp(txn.Txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
+ ledger.NewApp(txn.Receiver, 101, basics.AppParams{})
+ ledger.NewApp(txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
algoValue := basics.TealValue{Type: basics.TealBytesType, Bytes: "myval"}
ledger.NewGlobal(101, "mykey", algoValue)
@@ -1820,14 +1895,10 @@ app_global_get
int 7
==
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(nil)
+ ledger.NewAccount(txn.Txn.Sender, 1)
ep.Ledger = ledger
ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
@@ -1837,7 +1908,6 @@ int 7
func TestAppGlobalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// check write/delete/read
@@ -1866,16 +1936,10 @@ err
ok:
int 1
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
+ ep, txn, ledger := makeSampleEnv()
+ ledger.NewAccount(txn.Sender, 1)
+ txn.ApplicationID = 100
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
delta := testApp(t, source, ep)
require.Len(t, delta.GlobalDelta, 2)
@@ -1896,7 +1960,7 @@ byte 0x414c474f
app_global_get_ex
== // two zeros
`
- ep.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID}
delta = testApp(t, source, ep)
require.Len(t, delta.GlobalDelta, 1)
vd := delta.GlobalDelta["ALGO"]
@@ -1997,7 +2061,6 @@ int 1
func TestAppLocalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// check write/delete/read
@@ -2032,10 +2095,9 @@ err
ok:
int 1
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -2195,6 +2257,7 @@ int 1
func TestEnumFieldErrors(t *testing.T) {
partitiontest.PartitionTest(t)
+ // t.Parallel() NO! manipulates globalFieldSpecs
source := `txn Amount`
origSpec := txnFieldSpecs[Amount]
@@ -2205,8 +2268,8 @@ func TestEnumFieldErrors(t *testing.T) {
txnFieldSpecs[Amount] = origSpec
}()
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
- testApp(t, source, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "Amount expected field type is []byte but got uint64")
+ testApp(t, source, defaultEvalParams(), "Amount expected field type is []byte but got uint64")
source = `global MinTxnFee`
@@ -2218,8 +2281,8 @@ func TestEnumFieldErrors(t *testing.T) {
globalFieldSpecs[MinTxnFee] = origMinTxnFs
}()
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
- testApp(t, source, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "MinTxnFee expected field type is []byte but got uint64")
+ testApp(t, source, defaultEvalParams(), "MinTxnFee expected field type is []byte but got uint64")
ep, tx, ledger := makeSampleEnv()
ledger.NewAccount(tx.Sender, 1)
@@ -2269,13 +2332,13 @@ assert
func TestReturnTypes(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Ensure all opcodes return values they are supposed to according to the OpSpecs table
- t.Parallel()
typeToArg := map[StackType]string{
StackUint64: "int 1\n",
StackAny: "int 1\n",
- StackBytes: "byte 0x33343536\n",
+ StackBytes: "byte 0x33343536\n", // Which is the string "3456"
}
ep, tx, ledger := makeSampleEnv()
@@ -2283,6 +2346,9 @@ func TestReturnTypes(t *testing.T) {
tx.ApplicationID = 1
tx.ForeignApps = []basics.AppIndex{tx.ApplicationID}
tx.ForeignAssets = []basics.AssetIndex{basics.AssetIndex(1), basics.AssetIndex(1)}
+ tx.Boxes = []transactions.BoxRef{{
+ Name: []byte("3456"),
+ }}
ep.TxnGroup[0].Lsig.Args = [][]byte{
[]byte("aoeu"),
[]byte("aoeu"),
@@ -2318,7 +2384,7 @@ func TestReturnTypes(t *testing.T) {
ledger.NewAccount(appAddr(1), 1000000)
// We try to form a snippet that will test every opcode, by sandwiching it
- // between arguments that correspond to the opcodes input types, and then
+ // between arguments that correspond to the opcode's input types, and then
// check to see if the proper output types end up on the stack. But many
// opcodes require more specific inputs than a constant string or the number
// 1 for ints. Defaults are also supplied for immediate arguments. For
@@ -2377,6 +2443,9 @@ func TestReturnTypes(t *testing.T) {
"proto": "callsub p; p: proto 0 3",
"bury": ": int 1; int 2; int 3; bury 2; pop; pop;",
+
+ "box_create": "int 9; +; box_create", // make the size match the 10 in CreateBox
+ "box_put": "byte 0x010203040506; concat; box_put", // make the 4 byte arg into a 10
}
/* Make sure the specialCmd tests the opcode in question */
@@ -2399,12 +2468,12 @@ func TestReturnTypes(t *testing.T) {
"vrf_verify": true,
+ "frame_dig": true, // would need a "proto" subroutine
+ "frame_bury": true, // would need a "proto" subroutine
+
"bn256_add": true,
"bn256_scalar_mul": true,
"bn256_pairing": true,
-
- "frame_dig": true, // would need a "proto" subroutine
- "frame_bury": true, // would need a "proto" subroutine
}
byName := OpsByName[LogicVersion]
@@ -2462,6 +2531,9 @@ func TestReturnTypes(t *testing.T) {
ep.reset() // for Trace and budget isolation
ep.pastScratch[0] = &scratchSpace{} // for gload
+ // these allows the box_* opcodes that to work
+ ledger.CreateBox(1, "3456", 10)
+ ep.ioBudget = 50
cx := EvalContext{
EvalParams: ep,
@@ -2554,8 +2626,8 @@ func TestBlockSeed(t *testing.T) {
// makeSampleEnv creates txns with fv, lv that don't actually fit the round
// in l. Nothing in most tests cares. But the rule for `block` is related
// to lv and fv, so we set the fv,lv more realistically.
- txn.FirstValid = l.round() - 10
- txn.LastValid = l.round() + 10
+ txn.FirstValid = l.Round() - 10
+ txn.LastValid = l.Round() + 10
// Keep in mind that proto.MaxTxnLife is 1500 in the test proto
@@ -2622,7 +2694,7 @@ func TestPooledAppCallsVerifyOp(t *testing.T) {
pop
int 1`
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
call := transactions.SignedTxn{Txn: transactions.Transaction{Type: protocol.ApplicationCallTx}}
// Simulate test with 2 grouped txn
testApps(t, []string{source, ""}, []transactions.SignedTxn{call, call}, LogicVersion, ledger,
@@ -2658,8 +2730,8 @@ func TestAppInfo(t *testing.T) {
func TestBudget(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- ep := defaultEvalParams(nil)
source := `
global OpcodeBudget
int 699
@@ -2669,11 +2741,12 @@ global OpcodeBudget
int 695
==
`
- testApp(t, source, ep)
+ testApp(t, source, defaultEvalParams())
}
func TestSelfMutate(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
ep, _, ledger := makeSampleEnv()
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 130167bf0..9fa175373 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -47,12 +47,15 @@ func makeTestProto() *config.ConsensusParams {
func makeTestProtoV(version uint64) *config.ConsensusParams {
return &config.ConsensusParams{
- LogicSigVersion: version,
- LogicSigMaxCost: 20000,
- Application: version >= appsEnabledVersion,
- MaxAppProgramCost: 700,
- MaxAppKeyLen: 64,
- MaxAppBytesValueLen: 64,
+ LogicSigVersion: version,
+ LogicSigMaxCost: 20000,
+ Application: version >= appsEnabledVersion,
+ MaxAppProgramCost: 700,
+
+ MaxAppKeyLen: 64,
+ MaxAppBytesValueLen: 64,
+ MaxAppSumKeyValueLens: 128,
+
// These must be identical to keep an old backward compat test working
MinTxnFee: 1001,
MinBalance: 1001,
@@ -104,15 +107,18 @@ func makeTestProtoV(version uint64) *config.ConsensusParams {
SupportBecomeNonParticipatingTransactions: true,
UnifyInnerTxIDs: true,
+
+ MaxBoxSize: 1000,
+ BytesPerBoxReference: 100,
}
}
-func defaultEvalParams(txn *transactions.SignedTxn) *EvalParams {
- return defaultEvalParamsWithVersion(txn, LogicVersion)
+func defaultEvalParams(txns ...transactions.SignedTxn) *EvalParams {
+ return defaultEvalParamsWithVersion(LogicVersion, txns...)
}
-func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
- ep := defaultEvalParamsWithVersion(txn, LogicVersion)
+func benchmarkEvalParams(txn transactions.SignedTxn) *EvalParams {
+ ep := defaultEvalParams(txn)
ep.Trace = nil // Tracing would slow down benchmarks
clone := *ep.Proto
bigBudget := 1000 * 1000 * 1000 // Allow long run times
@@ -123,23 +129,28 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
return ep
}
-func defaultEvalParamsWithVersion(txn *transactions.SignedTxn, version uint64) *EvalParams {
- var zero uint64
- ep := &EvalParams{
- Proto: makeTestProtoV(version),
- TxnGroup: make([]transactions.SignedTxnWithAD, 1),
- Specials: &transactions.SpecialAddresses{},
- Trace: &strings.Builder{},
- FeeCredit: &zero,
- SigLedger: MakeLedger(nil),
+func defaultEvalParamsWithVersion(version uint64, txns ...transactions.SignedTxn) *EvalParams {
+ empty := false
+ if len(txns) == 0 {
+ empty = true
+ txns = []transactions.SignedTxn{{Txn: transactions.Transaction{Type: protocol.ApplicationCallTx}}}
}
- if txn != nil {
- ep.TxnGroup[0].SignedTxn = *txn
+ ep := NewEvalParams(transactions.WrapSignedTxnsWithAD(txns), makeTestProtoV(version), &transactions.SpecialAddresses{})
+ ep.Trace = &strings.Builder{}
+ ep.SigLedger = NewLedger(nil)
+ if empty {
+ // We made an app type in order to get a full ep, but that sets MinTealVersion=2
+ ep.TxnGroup[0].Txn.Type = "" // set it back
+ ep.MinAvmVersion = nil // will recalculate in eval()
}
- ep.reset()
return ep
}
+// `supportsAppEval` is test helper method for disambiguating whe `EvalParams` is suitable for logicsig vs app evaluations.
+func (ep *EvalParams) supportsAppEval() bool {
+ return ep.available != nil
+}
+
// reset puts an ep back into its original state. This is in *_test.go because
// no real code should ever need this. EvalParams should be created to evaluate
// a group, and then thrown away.
@@ -156,9 +167,23 @@ func (ep *EvalParams) reset() {
for i := range ep.TxnGroup {
ep.TxnGroup[i].ApplyData = transactions.ApplyData{}
}
- ep.created = &resources{}
+ if ep.available != nil {
+ ep.available.apps = nil
+ ep.available.asas = nil
+ // reinitialize boxes because evaluation can add box refs for app creates.
+ available := NewEvalParams(ep.TxnGroup, ep.Proto, ep.Specials).available
+ if available != nil {
+ ep.available.boxes = available.boxes
+ }
+ ep.available.dirtyBytes = 0
+ }
+ ep.readBudgetChecked = false
ep.appAddrCache = make(map[basics.AppIndex]basics.Address)
- ep.Trace = &strings.Builder{}
+ if ep.Trace != nil {
+ ep.Trace = &strings.Builder{}
+ }
+ ep.txidCache = nil
+ ep.innerTxidCache = nil
}
func TestTooManyArgs(t *testing.T) {
@@ -172,7 +197,7 @@ func TestTooManyArgs(t *testing.T) {
txn.Lsig.Logic = ops.Program
args := [transactions.EvalMaxArgs + 1][]byte{}
txn.Lsig.Args = args[:]
- pass, err := EvalSignature(0, defaultEvalParams(&txn))
+ pass, err := EvalSignature(0, defaultEvalParams(txn))
require.Error(t, err)
require.False(t, pass)
})
@@ -183,7 +208,7 @@ func TestEmptyProgram(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testLogicBytes(t, nil, defaultEvalParams(nil), "invalid", "invalid program (empty)")
+ testLogicBytes(t, nil, defaultEvalParams(), "invalid", "invalid program (empty)")
}
// TestMinAvmVersionParamEval tests eval/check reading the MinAvmVersion from the param
@@ -191,7 +216,7 @@ func TestMinAvmVersionParamEvalCheckSignature(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- params := defaultEvalParams(nil)
+ params := defaultEvalParams()
version2 := uint64(rekeyingEnabledVersion)
params.MinAvmVersion = &version2
program := make([]byte, binary.MaxVarintLen64)
@@ -314,7 +339,7 @@ func TestWrongProtoVersion(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, "int 1", v)
- ep := defaultEvalParamsWithVersion(nil, 0)
+ ep := defaultEvalParamsWithVersion(0)
testAppBytes(t, ops.Program, ep, "LogicSig not supported", "LogicSig not supported")
})
}
@@ -361,7 +386,7 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E=
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckSignature(0, ep)
require.NoError(t, err)
pass, err := EvalSignature(0, ep)
@@ -422,7 +447,7 @@ func TestTLHC(t *testing.T) {
txn.Lsig.Args = [][]byte{secret}
txn.Txn.FirstValid = 999999
block := bookkeeping.Block{}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckSignature(0, ep)
if err != nil {
t.Log(hex.EncodeToString(ops.Program))
@@ -439,7 +464,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a2
txn.Txn.CloseRemainderTo = a2
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -451,7 +476,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a2
txn.Txn.CloseRemainderTo = a2
txn.Txn.FirstValid = 1
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -463,7 +488,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a1
txn.Txn.CloseRemainderTo = a1
txn.Txn.FirstValid = 999999
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -475,7 +500,7 @@ func TestTLHC(t *testing.T) {
// wrong answer
txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849a")}
block.BlockHeader.Round = 1
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -489,22 +514,22 @@ func TestTLHC(t *testing.T) {
func TestU64Math(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "int 0x1234567812345678; int 0x100000000; /; int 0x12345678; ==", 1)
}
func TestItob(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "byte 0x1234567812345678; int 0x1234567812345678; itob; ==", 1)
}
func TestBtoi(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "int 0x1234567812345678; byte 0x1234567812345678; btoi; ==", 1)
testAccepts(t, "int 0x34567812345678; byte 0x34567812345678; btoi; ==", 1)
testAccepts(t, "int 0x567812345678; byte 0x567812345678; btoi; ==", 1)
@@ -913,7 +938,7 @@ func TestTxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x31, 0x7f}
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid txn field")
// TODO: Check should know the type stack was wrong
// test txn does not accept ApplicationArgs and Accounts
@@ -926,7 +951,7 @@ func TestTxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), fmt.Sprintf("invalid txn field %s", field))
}
}
@@ -935,7 +960,7 @@ func TestGtxnBadIndex(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x1, 0x01}
- testLogicBytes(t, program, defaultEvalParams(nil), "txn index 1")
+ testLogicBytes(t, program, defaultEvalParams(), "txn index 1")
}
func TestGtxnBadField(t *testing.T) {
@@ -944,7 +969,7 @@ func TestGtxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x0, 127}
// TODO: Check should know the type stack was wrong
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field TxnField(127)")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid txn field TxnField(127)")
// test gtxn does not accept ApplicationArgs and Accounts
txnOpcode := OpsByName[LogicVersion]["txn"].Opcode
@@ -956,7 +981,7 @@ func TestGtxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), fmt.Sprintf("invalid txn field %s", field))
}
}
@@ -965,7 +990,7 @@ func TestGlobalBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x32, 127}
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid global field")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid global field")
}
func TestArg(t *testing.T) {
@@ -988,7 +1013,7 @@ func TestArg(t *testing.T) {
[]byte("aoeu4"),
}
ops := testProg(t, source, v)
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
})
}
}
@@ -1080,6 +1105,10 @@ const globalV8TestProgram = globalV7TestProgram + `
// No new globals in v8
`
+const globalV9TestProgram = globalV8TestProgram + `
+// No new globals in v9
+`
+
func TestGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1099,12 +1128,13 @@ func TestGlobal(t *testing.T) {
6: {CallerApplicationAddress, globalV6TestProgram},
7: {CallerApplicationAddress, globalV7TestProgram},
8: {CallerApplicationAddress, globalV8TestProgram},
+ 9: {CallerApplicationAddress, globalV9TestProgram},
}
// tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
require.Len(t, globalFieldSpecs, int(invalidGlobalField))
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
addr, err := basics.UnmarshalChecksumAddress(testAddr)
require.NoError(t, err)
ledger.NewApp(addr, 888, basics.AppParams{})
@@ -1120,10 +1150,14 @@ func TestGlobal(t *testing.T) {
}
}
- txn := transactions.SignedTxn{}
- txn.Txn.Group = crypto.Digest{0x07, 0x06}
+ appcall := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ },
+ }
+ appcall.Txn.Group = crypto.Digest{0x07, 0x06}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(appcall)
ep.Ledger = ledger
testApp(t, tests[v].program, ep)
})
@@ -1168,11 +1202,11 @@ int %s
txn := transactions.SignedTxn{}
txn.Txn.Type = tt
if v < appsEnabledVersion && tt == protocol.ApplicationCallTx {
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn),
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn),
"program version must be", "program version must be")
return
}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
})
}
})
@@ -1266,13 +1300,25 @@ txn VoteKeyDilution
int 1
==
&&
+
txn Type
-byte 0x706179
+byte "pay"
+==
+txn Type
+byte "appl"
==
+||
+
&&
+
txn TypeEnum
int 1
==
+txn TypeEnum
+int 6
+==
+||
+
&&
txn XferAsset
int 10
@@ -1576,6 +1622,14 @@ int 1
`
const testTxnProgramTextV8 = testTxnProgramTextV7 + `
+assert
+// though box refs introduced in v8, they are not exposed to AVM (yet?)
+int 1
+`
+
+const testTxnProgramTextV9 = testTxnProgramTextV8 + `
+assert
+int 1
`
func makeSampleTxn() transactions.SignedTxn {
@@ -1642,11 +1696,19 @@ func makeSampleTxn() transactions.SignedTxn {
txn.Txn.AssetFrozen = true
txn.Txn.ForeignAssets = []basics.AssetIndex{55, 77}
txn.Txn.ForeignApps = []basics.AppIndex{56, 100, 111} // 100 must be 2nd, 111 must be present
+ txn.Txn.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("self")}, {Index: 0, Name: []byte("other")}}
txn.Txn.GlobalStateSchema = basics.StateSchema{NumUint: 3, NumByteSlice: 0}
txn.Txn.LocalStateSchema = basics.StateSchema{NumUint: 1, NumByteSlice: 2}
return txn
}
+func makeSampleAppl(app basics.AppIndex) transactions.SignedTxn {
+ sample := makeSampleTxn()
+ sample.Txn.Type = protocol.ApplicationCallTx
+ sample.Txn.ApplicationID = app
+ return sample
+}
+
// makeSampleTxnGroup creates a sample txn group. If less than two transactions
// are supplied, samples are used.
func makeSampleTxnGroup(txns ...transactions.SignedTxn) []transactions.SignedTxn {
@@ -1681,6 +1743,7 @@ func TestTxn(t *testing.T) {
6: testTxnProgramTextV6,
7: testTxnProgramTextV7,
8: testTxnProgramTextV8,
+ 9: testTxnProgramTextV9,
}
for i, txnField := range TxnFieldNames {
@@ -1706,6 +1769,9 @@ func TestTxn(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
txn := makeSampleTxn()
+ if v >= appsEnabledVersion {
+ txn.Txn.Type = protocol.ApplicationCallTx
+ }
txn.Txn.ApprovalProgram = ops.Program
txn.Txn.ClearStateProgram = clearOps.Program
txn.Lsig.Logic = ops.Program
@@ -1730,9 +1796,8 @@ func TestTxn(t *testing.T) {
programHash[:],
clearProgramHash[:],
}
- // Since we test GroupIndex ==3, we need to fake up such a group
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{txn, txn, txn, txn})
+ // Since we test GroupIndex ==3, we need a larger group
+ ep := defaultEvalParams(txn, txn, txn, txn)
ep.TxnGroup[2].EvalDelta.Logs = []string{"x", "prefilled"}
if v < txnEffectsVersion {
testLogicFull(t, ops.Program, 3, ep)
@@ -1816,16 +1881,12 @@ func TestGaid(t *testing.T) {
t.Parallel()
check0 := testProg(t, "gaid 0; int 100; ==", 4)
- txn := makeSampleTxn()
- txn.Txn.Type = protocol.ApplicationCallTx
- txgroup := make([]transactions.SignedTxn, 3)
- txgroup[1] = txn
+ appTxn := makeSampleTxn()
+ appTxn.Txn.Type = protocol.ApplicationCallTx
targetTxn := makeSampleTxn()
targetTxn.Txn.Type = protocol.AssetConfigTx
- txgroup[0] = targetTxn
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
- ep.Ledger = MakeLedger(nil)
+ ep := defaultEvalParams(targetTxn, appTxn, makeSampleTxn())
+ ep.Ledger = NewLedger(nil)
// should fail when no creatable was created
_, err := EvalApp(check0.Program, 1, 888, ep)
@@ -1982,8 +2043,7 @@ gtxn 0 Sender
txn.Txn.SelectionPK[:],
txn.Txn.Note,
}
- ep := defaultEvalParams(&txn)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(makeSampleTxnGroup(txn))
+ ep := defaultEvalParams(makeSampleTxnGroup(txn)...)
testLogic(t, source, v, ep)
if v >= 3 {
gtxnsProg := strings.ReplaceAll(source, "gtxn 0", "int 0; gtxns")
@@ -2072,7 +2132,7 @@ txna ApplicationArgs 0
txn.Txn.Accounts = make([]basics.Address, 1)
txn.Txn.Accounts[0] = txn.Txn.Sender
txn.Txn.ApplicationArgs = [][]byte{txn.Txn.Sender[:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
// modify txn field
@@ -2106,7 +2166,7 @@ txn Sender
ops2 := testProg(t, source, AssemblerMaxVersion)
var txn2 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- ep2 := defaultEvalParams(&txn2)
+ ep2 := defaultEvalParams(txn2)
testLogicBytes(t, ops2.Program, ep2)
// check gtxna
@@ -2147,7 +2207,7 @@ txn Sender
ops3 := testProg(t, source, AssemblerMaxVersion)
var txn3 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- ep3 := defaultEvalParams(&txn3)
+ ep3 := defaultEvalParams(txn3)
testLogicBytes(t, ops3.Program, ep3)
}
@@ -2166,10 +2226,10 @@ int 0
var txn transactions.SignedTxn
txn.Txn.ApplicationArgs = make([][]byte, 1)
txn.Txn.ApplicationArgs[0] = []byte("")
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
txn.Txn.ApplicationArgs[0] = nil
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
source2 := `txna Accounts 1
global ZeroAddress
@@ -2180,10 +2240,10 @@ global ZeroAddress
var txn2 transactions.SignedTxn
txn2.Txn.Accounts = make([]basics.Address, 1)
txn2.Txn.Accounts[0] = basics.Address{}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
txn2.Txn.Accounts = make([]basics.Address, 1)
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
}
func TestTxnBigPrograms(t *testing.T) {
@@ -2209,14 +2269,14 @@ int 1
for i := range txn.Txn.ApprovalProgram {
txn.Txn.ApprovalProgram[i] = byte(i % 7)
}
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(&txn))
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(txn))
- testLogic(t, `txna ApprovalProgramPages 2`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ testLogic(t, `txna ApprovalProgramPages 2`, AssemblerMaxVersion, defaultEvalParams(txn),
"invalid ApprovalProgramPages index")
// ClearStateProgram is not in the txn at all
- testLogic(t, `txn NumClearStateProgramPages; !`, AssemblerMaxVersion, defaultEvalParams(&txn))
- testLogic(t, `txna ClearStateProgramPages 0`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ testLogic(t, `txn NumClearStateProgramPages; !`, AssemblerMaxVersion, defaultEvalParams(txn))
+ testLogic(t, `txna ClearStateProgramPages 0`, AssemblerMaxVersion, defaultEvalParams(txn),
"invalid ClearStateProgramPages index")
}
@@ -2236,7 +2296,7 @@ txnas ApplicationArgs
txn.Txn.Accounts = make([]basics.Address, 1)
txn.Txn.Accounts[0] = txn.Txn.Sender
txn.Txn.ApplicationArgs = [][]byte{txn.Txn.Sender[:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
// check special case: Account 0 == Sender
@@ -2249,7 +2309,7 @@ txn Sender
ops = testProg(t, source, AssemblerMaxVersion)
var txn2 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
// check gtxnas
source = `int 1
@@ -2269,7 +2329,7 @@ txn Sender
ops = testProg(t, source, AssemblerMaxVersion)
var txn3 transactions.SignedTxn
copy(txn3.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn3))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn3))
// check gtxnsas
source = `int 0
@@ -2301,8 +2361,8 @@ int 0x310
func TestStringOps(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, `byte 0x123456789abc
substring 1 3
byte 0x3456
@@ -2398,6 +2458,7 @@ len`, 2)
func TestExtractOp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+
testAccepts(t, "byte 0x123456789abc; extract 1 2; byte 0x3456; ==", 5)
testAccepts(t, "byte 0x123456789abc; extract 0 6; byte 0x123456789abc; ==", 5)
testAccepts(t, "byte 0x123456789abc; extract 3 0; byte 0x789abc; ==", 5)
@@ -2438,41 +2499,41 @@ func TestExtractFlop(t *testing.T) {
err := testPanics(t, `byte 0xf000000000000000
extract 1 8
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end 9")
err = testPanics(t, `byte 0xf000000000000000
extract 9 0
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 9")
err = testPanics(t, `byte 0xf000000000000000
int 4
int 0xFFFFFFFFFFFFFFFE
extract3
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end exceeds uint64")
err = testPanics(t, `byte 0xf000000000000000
int 100
int 2
extract3
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 100")
err = testPanics(t, `byte 0xf000000000000000
int 55
extract_uint16`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 55")
err = testPanics(t, `byte 0xf000000000000000
int 9
extract_uint32`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 9")
err = testPanics(t, `byte 0xf000000000000000
int 1
extract_uint64`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end 9")
}
func TestReplace(t *testing.T) {
@@ -2506,8 +2567,8 @@ func TestReplace(t *testing.T) {
func TestLoadStore(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "load 3; int 0; ==;", 1)
testAccepts(t, `int 37
@@ -2576,7 +2637,6 @@ int 5
func TestGload(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// for simple app-call-only transaction groups
@@ -2588,48 +2648,22 @@ func TestGload(t *testing.T) {
simpleCase := scratchTestCase{
tealSources: []string{
- `
-int 2
-store 0
-int 1`,
- `
-gload 0 0
-int 2
-==
-`,
+ `int 2; store 0; int 1`,
+ `gload 0 0; int 2; ==`,
},
}
multipleTxnCase := scratchTestCase{
tealSources: []string{
- `
-byte "txn 1"
-store 0
-int 1`,
- `
-byte "txn 2"
-store 2
-int 1`,
- `
-gload 0 0
-byte "txn 1"
-==
-gload 1 2
-byte "txn 2"
-==
-&&
-`,
+ `byte "txn 1"; store 0; int 1`,
+ `byte "txn 2"; store 2; int 1`,
+ `gload 0 0; byte "txn 1"; ==; gload 1 2; byte "txn 2"; ==; &&`,
},
}
selfCase := scratchTestCase{
tealSources: []string{
- `
-gload 0 0
-int 2
-store 0
-int 1
-`,
+ `gload 0 0; int 2; store 0; int 1`,
},
errTxn: 0,
errContains: "can't use gload on self, use load instead",
@@ -2637,14 +2671,8 @@ int 1
laterTxnSlotCase := scratchTestCase{
tealSources: []string{
- `
-gload 1 0
-int 2
-==`,
- `
-int 2
-store 0
-int 1`,
+ `gload 1 0; int 2; ==`,
+ `int 2; store 0; int 1`,
},
errTxn: 0,
errContains: "gload can't get future scratch space from txn with index 1",
@@ -2665,9 +2693,9 @@ int 1`,
}
if testCase.errContains != "" {
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil), Expect{testCase.errTxn, testCase.errContains})
+ testApps(t, sources, txgroup, LogicVersion, nil, Expect{testCase.errTxn, testCase.errContains})
} else {
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil))
+ testApps(t, sources, txgroup, LogicVersion, nil)
}
})
}
@@ -2702,20 +2730,17 @@ int 1`,
failCases := []failureCase{nonAppCall, logicSigCall}
for j, failCase := range failCases {
t.Run(fmt.Sprintf("j=%d", j), func(t *testing.T) {
- program := testProg(t, "gload 0 0", AssemblerMaxVersion).Program
- txgroup := []transactions.SignedTxnWithAD{
- {SignedTxn: failCase.firstTxn},
- {},
+ appcall := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ },
}
- ep := &EvalParams{
- Proto: makeTestProto(),
- TxnGroup: txgroup,
- pastScratch: make([]*scratchSpace, 2),
- SigLedger: MakeLedger(nil),
- }
+ ep := defaultEvalParams(failCase.firstTxn, appcall)
+ ep.SigLedger = NewLedger(nil)
+ program := testProg(t, "gload 0 0", AssemblerMaxVersion).Program
switch failCase.runMode {
case modeApp:
testAppBytes(t, program, ep, failCase.errContains)
@@ -2774,7 +2799,7 @@ int 1
txgroup[j].Txn.Type = protocol.ApplicationCallTx
}
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil))
+ testApps(t, sources, txgroup, LogicVersion, nil)
}
const testCompareProgramText = `int 35
@@ -2863,19 +2888,19 @@ func TestSlowLogic(t *testing.T) {
// v1overspend fails (on v1)
ops := testProg(t, v1overspend, 1)
// We should never Eval this after it fails Check(), but nice to see it also fails.
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 1),
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(1),
"static cost", "dynamic cost")
// v2overspend passes Check, even on v2 proto, because the old low cost is "grandfathered"
ops = testProg(t, v2overspend, 1)
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 2))
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(2))
// even the shorter, v2overspend, fails when compiled as v2 code
ops = testProg(t, v2overspend, 2)
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 2),
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(2),
"static cost", "dynamic cost")
// in v4 cost is still 134, but only matters in Eval, not Check, so both fail there
- ep4 := defaultEvalParamsWithVersion(nil, 4)
+ ep4 := defaultEvalParamsWithVersion(4)
ops = testProg(t, v1overspend, 4)
testLogicBytes(t, ops.Program, ep4, "dynamic cost")
@@ -2900,7 +2925,7 @@ func TestStackUnderflow(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
ops.Program = append(ops.Program, 0x08) // +
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "stack underflow")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "stack underflow")
})
}
}
@@ -2913,7 +2938,7 @@ func TestWrongStackTypeRuntime(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
ops.Program = append(ops.Program, 0x01, 0x15) // sha256, len
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "sha256 arg 0 wanted")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "sha256 arg 0 wanted")
})
}
}
@@ -2926,7 +2951,7 @@ func TestEqMismatch(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x12) // ==
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "cannot compare")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "cannot compare")
// TODO: Check should know the type stack was wrong
})
}
@@ -2940,7 +2965,7 @@ func TestNeqMismatch(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x13) // !=
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "cannot compare")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "cannot compare")
})
}
}
@@ -2953,7 +2978,7 @@ func TestWrongStackTypeRuntime2(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x08) // +
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "+ arg 0 wanted")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "+ arg 0 wanted")
})
}
}
@@ -2971,7 +2996,7 @@ func TestIllegalOp(t *testing.T) {
break
}
}
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "illegal opcode", "illegal opcode")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "illegal opcode", "illegal opcode")
})
}
}
@@ -2989,7 +3014,7 @@ int 1
`, v)
// cut two last bytes - intc_1 and last byte of bnz
ops.Program = ops.Program[:len(ops.Program)-2]
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"bnz program ends short", "bnz program ends short")
})
}
@@ -3004,7 +3029,7 @@ intc 0
intc 0
bnz done
done:`, 2)
- testLogicBytes(t, ops.Program, defaultEvalParams(nil))
+ testLogicBytes(t, ops.Program, defaultEvalParams())
}
func TestShortBytecblock(t *testing.T) {
@@ -3019,8 +3044,8 @@ func TestShortBytecblock(t *testing.T) {
for i := 2; i < len(fullops.Program); i++ {
program := fullops.Program[:i]
t.Run(hex.EncodeToString(program), func(t *testing.T) {
- testLogicBytes(t, program, defaultEvalParams(nil),
- "bytecblock", "bytecblock")
+ testLogicBytes(t, program, defaultEvalParams(),
+ "bytes list", "bytes list")
})
}
})
@@ -3041,7 +3066,7 @@ func TestShortBytecblock2(t *testing.T) {
t.Run(src, func(t *testing.T) {
program, err := hex.DecodeString(src)
require.NoError(t, err)
- testLogicBytes(t, program, defaultEvalParams(nil), "bytecblock", "bytecblock")
+ testLogicBytes(t, program, defaultEvalParams(), "const bytes list", "const bytes list")
})
}
}
@@ -3078,7 +3103,7 @@ func TestPanic(t *testing.T) {
break
}
}
- params := defaultEvalParams(nil)
+ params := defaultEvalParams()
params.logger = log
params.TxnGroup[0].Lsig.Logic = ops.Program
err := CheckSignature(0, params)
@@ -3092,7 +3117,7 @@ func TestPanic(t *testing.T) {
}
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- params = defaultEvalParams(&txn)
+ params = defaultEvalParams(txn)
params.logger = log
pass, err := EvalSignature(0, params)
if pass {
@@ -3118,7 +3143,7 @@ func TestProgramTooNew(t *testing.T) {
t.Parallel()
var program [12]byte
vlen := binary.PutUvarint(program[:], evalMaxVersion+1)
- testLogicBytes(t, program[:vlen], defaultEvalParams(nil),
+ testLogicBytes(t, program[:vlen], defaultEvalParams(),
"greater than max supported", "greater than max supported")
}
@@ -3128,7 +3153,7 @@ func TestInvalidVersion(t *testing.T) {
t.Parallel()
program, err := hex.DecodeString("ffffffffffffffffffffffff")
require.NoError(t, err)
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid version", "invalid version")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid version", "invalid version")
}
func TestProgramProtoForbidden(t *testing.T) {
@@ -3137,7 +3162,7 @@ func TestProgramProtoForbidden(t *testing.T) {
t.Parallel()
var program [12]byte
vlen := binary.PutUvarint(program[:], evalMaxVersion)
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = &config.ConsensusParams{
LogicSigVersion: evalMaxVersion - 1,
}
@@ -3162,16 +3187,16 @@ int 1`, v)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 3 // clobber the branch offset to be in the middle of the bytecblock
// Since Eval() doesn't know the jump is bad, we reject "by luck"
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "aligned", "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "aligned", "REJECT")
// back branches are checked differently, so test misaligned back branch
ops.Program[6] = 0xff // Clobber the two bytes of offset with 0xff 0xff = -1
ops.Program[7] = 0xff // That jumps into the offset itself (pc + 3 -1)
if v < backBranchEnabledVersion {
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "negative branch", "negative branch")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "negative branch", "negative branch")
} else {
// Again, if we were ever to Eval(), we would not know it's wrong. But we reject here "by luck"
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "back branch target", "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "back branch target", "REJECT")
}
})
}
@@ -3194,7 +3219,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"outside of program", "outside of program")
})
}
@@ -3218,7 +3243,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[6] = 0x70 // clobber hi byte of branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "outside", "outside")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "outside", "outside")
})
}
branches := []string{
@@ -3238,7 +3263,7 @@ intc_1
require.NoError(t, err)
ops.Program[7] = 0xf0 // clobber the branch offset - highly negative
ops.Program[8] = 0xff // clobber the branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"outside of program", "outside of program")
})
}
@@ -3526,10 +3551,10 @@ func evalLoop(b *testing.B, runs int, program []byte) {
for i := 0; i < runs; i++ {
var txn transactions.SignedTxn
txn.Lsig.Logic = program
- pass, err := EvalSignature(0, benchmarkEvalParams(&txn))
+ pass, err := EvalSignature(0, benchmarkEvalParams(txn))
if !pass {
// rerun to trace it. tracing messes up timing too much
- ep := benchmarkEvalParams(&txn)
+ ep := benchmarkEvalParams(txn)
ep.Trace = &strings.Builder{}
pass, err = EvalSignature(0, ep)
b.Log(ep.Trace.String())
@@ -3796,7 +3821,7 @@ func BenchmarkCheckx5(b *testing.B) {
for _, program := range programs {
var txn transactions.SignedTxn
txn.Lsig.Logic = program
- err := CheckSignature(0, defaultEvalParams(&txn))
+ err := CheckSignature(0, defaultEvalParams(txn))
if err != nil {
require.NoError(b, err)
}
@@ -3900,16 +3925,16 @@ pop
txn.Lsig.Logic = ops.Program
txn.Txn.ApplicationArgs = [][]byte{[]byte("test")}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
- ep = defaultEvalParamsWithVersion(&txn, 1)
+ ep = defaultEvalParamsWithVersion(1, txn)
testLogicBytes(t, ops.Program, ep,
"greater than protocol supported version 1", "greater than protocol supported version 1")
// hack the version and fail on illegal opcode
ops.Program[0] = 0x1
- ep = defaultEvalParamsWithVersion(&txn, 1)
+ ep = defaultEvalParamsWithVersion(1, txn)
testLogicBytes(t, ops.Program, ep, "illegal opcode 0x36", "illegal opcode 0x36") // txna
}
@@ -3994,7 +4019,6 @@ byte 0x // empty byte constant
func TestArgType(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
var sv stackValue
@@ -4009,14 +4033,14 @@ func TestArgType(t *testing.T) {
func TestApplicationsDisallowOldTeal(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
const source = "int 1"
txn := makeSampleTxn()
txn.Txn.Type = protocol.ApplicationCallTx
txn.Txn.RekeyTo = basics.Address{}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
for v := uint64(0); v < appsEnabledVersion; v++ {
ops := testProg(t, source, v)
@@ -4029,8 +4053,8 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
const source = "int 1"
// Construct a group of two payments, no rekeying
@@ -4069,8 +4093,7 @@ func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
for ci, cse := range cases {
t.Run(fmt.Sprintf("ci=%d", ci), func(t *testing.T) {
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(cse.group)
+ ep := defaultEvalParams(cse.group...)
// Computed MinAvmVersion should be == validFromVersion
calc := ComputeMinAvmVersion(ep.TxnGroup)
@@ -4080,14 +4103,18 @@ func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
expected := fmt.Sprintf("program version must be >= %d", cse.validFromVersion)
for v := uint64(0); v < cse.validFromVersion; v++ {
ops := testProg(t, source, v)
- testAppBytes(t, ops.Program, ep, expected, expected)
+ if ep.supportsAppEval() {
+ testAppBytes(t, ops.Program, ep, expected, expected)
+ }
testLogicBytes(t, ops.Program, ep, expected, expected)
}
// Should succeed for all versions >= validFromVersion
for v := cse.validFromVersion; v <= AssemblerMaxVersion; v++ {
ops := testProg(t, source, v)
- testAppBytes(t, ops.Program, ep)
+ if ep.supportsAppEval() {
+ testAppBytes(t, ops.Program, ep)
+ }
testLogicBytes(t, ops.Program, ep)
}
})
@@ -4133,7 +4160,7 @@ func TestAllowedOpcodesV2(t *testing.T) {
"gtxn": true,
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
cnt := 0
for _, spec := range OpSpecs {
@@ -4186,7 +4213,7 @@ func TestAllowedOpcodesV3(t *testing.T) {
"pushbytes": `pushbytes "stringsfail?"`,
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
cnt := 0
for _, spec := range OpSpecs {
@@ -4231,7 +4258,7 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
ops := testProg(t, "int 1", v)
var txn transactions.SignedTxn
txn.Txn.RekeyTo = basics.Address{1, 2, 3, 4}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
e := fmt.Sprintf("program version must be >= %d", rekeyingEnabledVersion)
testLogicBytes(t, ops.Program, ep, e, e)
})
@@ -4272,13 +4299,13 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
t.Helper()
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- ep := defaultEvalParamsWithVersion(&txn, lv)
+ ep := defaultEvalParamsWithVersion(lv, txn)
err := CheckSignature(0, ep)
if err != nil {
t.Log(ep.Trace.String())
}
require.NoError(t, err)
- ep = defaultEvalParamsWithVersion(&txn, lv)
+ ep = defaultEvalParamsWithVersion(lv, txn)
pass, err := EvalSignature(0, ep)
ok := tester(t, pass, err)
if !ok {
@@ -4751,9 +4778,11 @@ func TestBytesMath(t *testing.T) {
testAccepts(t, "byte 0x01; byte 0x01; b/; byte 0x01; ==", 4)
testPanics(t, "byte 0x0200; byte b64(); b/; int 1; return", 4)
testPanics(t, "byte 0x01; byte 0x00; b/; int 1; return", 4)
+ testPanics(t, "int 65; bzero; byte 0x01; b/; int 1; return", 4)
testAccepts(t, "byte 0x10; byte 0x07; b%; byte 0x02; ==; return", 4)
testPanics(t, "byte 0x01; byte 0x00; b%; int 1; return", 4)
+ testPanics(t, "int 65; bzero; byte 0x10; b%", 4)
// Even 128 byte outputs are ok
testAccepts(t, fmt.Sprintf("byte 0x%s; byte 0x%s; b*; len; int 128; ==", effs, effs), 4)
@@ -4778,6 +4807,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x10; byte 0x10; b<; !", 4)
testAccepts(t, "byte 0x10; byte 0x10; b<=", 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b<=", 4)
testAccepts(t, "byte 0x10; int 64; bzero; b>", 4)
testPanics(t, "byte 0x10; int 65; bzero; b>", 4)
@@ -4786,6 +4816,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x10; b>=", 4)
testAccepts(t, "byte 0x11; byte 0x0011; b>=", 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b>=", 4)
testAccepts(t, "byte 0x11; byte 0x11; b==", 4)
testAccepts(t, "byte 0x0011; byte 0x11; b==", 4)
@@ -4796,6 +4827,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x00; b!=", 4)
testAccepts(t, "byte 0x0011; byte 0x1100; b!=", 4)
testPanics(t, notrack("byte 0x11; int 17; b!="), 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b!=", 4)
}
func TestBytesBits(t *testing.T) {
@@ -4842,9 +4874,9 @@ func TestLog(t *testing.T) {
t.Parallel()
var txn transactions.SignedTxn
txn.Txn.Type = protocol.ApplicationCallTx
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(txn.Txn.Receiver, 0, basics.AppParams{})
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
ep.Proto = makeTestProtoV(LogicVersion)
ep.Ledger = ledger
testCases := []struct {
@@ -4964,6 +4996,7 @@ func TestPcDetails(t *testing.T) {
t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
ops := testProg(t, test.source, LogicVersion)
ep, _, _ := makeSampleEnv()
+ ep.Trace = &strings.Builder{}
pass, cx, err := EvalContract(ops.Program, 0, 888, ep)
require.Error(t, err)
@@ -5196,16 +5229,12 @@ func TestProtocolParseDuplicateErrMsg(t *testing.T) {
func TestOpJSONRef(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- proto := makeTestProtoV(LogicVersion)
- txn := transactions.SignedTxn{
- Txn: transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- },
- }
- ledger := MakeLedger(nil)
+
+ var txn transactions.SignedTxn
+ txn.Txn.Type = protocol.ApplicationCallTx
+ ledger := NewLedger(nil)
ledger.NewApp(txn.Txn.Receiver, 0, basics.AppParams{})
- ep := defaultEvalParams(&txn)
- ep.Proto = proto
+ ep := defaultEvalParams(txn)
ep.Ledger = ledger
testCases := []struct {
source string
@@ -5691,3 +5720,203 @@ int 88
switch done1 done2; done1: ; done2: ;
`, 8)
}
+
+func TestMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // take the 0th label with int cases
+ testAccepts(t, `
+int 99
+int 100
+int 99
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 0th label with bytes cases
+ testAccepts(t, `
+byte "0"
+byte "1"
+byte "0"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label with int cases
+ testRejects(t, `
+int 99
+int 100
+int 100
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label with bytes cases
+ testRejects(t, `
+byte "0"
+byte "1"
+byte "1"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // same, but jumping to end of program
+ testAccepts(t, `
+int 1; int 99; int 100; int 100
+match zero one
+zero: err
+one:
+`, 8)
+
+ // no match
+ testAccepts(t, `
+int 99
+int 100
+int 101
+match zero one
+int 1; return // falls through to here
+zero: int 0; return
+one: int 0; return
+`, 8)
+
+ // jump forward and backward
+ testAccepts(t, `
+int 99
+start:
+int 1
++
+int 100
+int 101
+dig 2
+match start end
+err
+end:
+int 101
+==
+assert
+int 1
+`, 8)
+
+ // 0 labels are allowed, but weird!
+ testAccepts(t, `
+int 0
+match
+int 1
+`, 8)
+
+ testPanics(t, notrack("match; int 1"), 8)
+
+ // make the match the final instruction
+ testAccepts(t, `
+int 1
+int 100
+int 99
+int 100
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ // make the switch the final instruction, and don't match
+ testAccepts(t, `
+int 1
+int 1
+int 2
+int 88
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ // allow mixed types for match cases
+ testAccepts(t, `
+int 1
+int 100
+byte "101"
+byte "101"
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+int 1
+byte "0"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+int 1
+int 1
+match zero one
+err
+one: int 1; return
+zero: int 0;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+byte "1"
+int 1
+match zero one
+int 1; return
+zero: int 0;
+one: int 0;
+`, 8)
+}
+
+func TestPushConsts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+pushints 1 2
+int 2
+==
+assert
+int 1
+==
+assert
+int 1
+`, 8)
+
+ testAccepts(t, `
+pushbytess "1" "2"
+byte "2"
+==
+assert
+byte "1"
+==
+assert
+int 1
+`, 8)
+
+ valsStr := make([]string, 256)
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("%d", i)
+ }
+ source := fmt.Sprintf(`pushints %s`, strings.Join(valsStr, " "))
+ testAccepts(t, source+`
+popn 255
+pop
+int 1
+`, 8)
+
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("\"%d\"", i)
+ }
+ source = fmt.Sprintf(`pushbytess %s`, strings.Join(valsStr, " "))
+ testAccepts(t, source+`
+popn 255
+pop
+int 1
+`, 8)
+}
diff --git a/data/transactions/logic/export_test.go b/data/transactions/logic/export_test.go
index 1a1a21ce2..67346482a 100644
--- a/data/transactions/logic/export_test.go
+++ b/data/transactions/logic/export_test.go
@@ -16,6 +16,8 @@
package logic
+import "github.com/algorand/go-algorand/data/basics"
+
// Export for testing only. See
// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd for a
// nice explanation. tl;dr: Since some of our testing is in logic_test package,
@@ -30,6 +32,18 @@ func (ep *EvalParams) Reset() {
ep.reset()
}
+// Inefficient (hashing), just a testing convenience
+func (l *Ledger) CreateBox(app basics.AppIndex, name string, size uint64) {
+ l.NewBox(app, name, make([]byte, size), app.Address())
+}
+
+// Inefficient (hashing), just a testing convenience
+func (l *Ledger) DelBoxes(app basics.AppIndex, names ...string) {
+ for _, n := range names {
+ l.DelBox(app, n, app.Address())
+ }
+}
+
var MakeSampleEnv = makeSampleEnv
var MakeSampleEnvWithVersion = makeSampleEnvWithVersion
var MakeSampleTxn = makeSampleTxn
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index cb1685d91..0b62a5239 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -1202,9 +1202,32 @@ const (
AcctBalance AcctParamsField = iota
// AcctMinBalance is algos needed for this accounts apps and assets
AcctMinBalance
- //AcctAuthAddr is the rekeyed address if any, else ZeroAddress
+ // AcctAuthAddr is the rekeyed address if any, else ZeroAddress
AcctAuthAddr
+ // AcctTotalNumUint is the count of all uints from created global apps or opted in locals
+ AcctTotalNumUint
+ // AcctTotalNumByteSlice is the count of all byte slices from created global apps or opted in locals
+ AcctTotalNumByteSlice
+
+ // AcctTotalExtraAppPages is the extra code pages across all apps
+ AcctTotalExtraAppPages
+
+ // AcctTotalAppsCreated is the number of apps created by this account
+ AcctTotalAppsCreated
+ // AcctTotalAppsOptedIn is the number of apps opted in by this account
+ AcctTotalAppsOptedIn
+ // AcctTotalAssetsCreated is the number of ASAs created by this account
+ AcctTotalAssetsCreated
+ // AcctTotalAssets is the number of ASAs opted in by this account (always includes AcctTotalAssetsCreated)
+ AcctTotalAssets
+ // AcctTotalBoxes is the number of boxes created by the app this account is associated with
+ AcctTotalBoxes
+ // AcctTotalBoxBytes is the number of bytes in all boxes of this app account
+ AcctTotalBoxBytes
+
+ // AcctTotalAppSchema - consider how to expose
+
invalidAcctParamsField // compile-time constant for number of fields
)
@@ -1235,8 +1258,18 @@ func (fs acctParamsFieldSpec) Note() string {
var acctParamsFieldSpecs = [...]acctParamsFieldSpec{
{AcctBalance, StackUint64, 6, "Account balance in microalgos"},
- {AcctMinBalance, StackUint64, 6, "Minimum required blance for account, in microalgos"},
+ {AcctMinBalance, StackUint64, 6, "Minimum required balance for account, in microalgos"},
{AcctAuthAddr, StackBytes, 6, "Address the account is rekeyed to."},
+
+ {AcctTotalNumUint, StackUint64, 8, "The total number of uint64 values allocated by this account in Global and Local States."},
+ {AcctTotalNumByteSlice, StackUint64, 8, "The total number of byte array values allocated by this account in Global and Local States."},
+ {AcctTotalExtraAppPages, StackUint64, 8, "The number of extra app code pages used by this account."},
+ {AcctTotalAppsCreated, StackUint64, 8, "The number of existing apps created by this account."},
+ {AcctTotalAppsOptedIn, StackUint64, 8, "The number of apps this account is opted into."},
+ {AcctTotalAssetsCreated, StackUint64, 8, "The number of existing ASAs created by this account."},
+ {AcctTotalAssets, StackUint64, 8, "The numbers of ASAs held by this account (including ASAs this account created)."},
+ {AcctTotalBoxes, StackUint64, boxVersion, "The number of existing boxes created by this account's app."},
+ {AcctTotalBoxBytes, StackUint64, boxVersion, "The total number of bytes used by this account's app's box keys and values."},
}
func acctParamsFieldSpecByField(f AcctParamsField) (acctParamsFieldSpec, bool) {
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index 6c90a7a67..44531c2bd 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -183,12 +183,21 @@ func _() {
_ = x[AcctBalance-0]
_ = x[AcctMinBalance-1]
_ = x[AcctAuthAddr-2]
- _ = x[invalidAcctParamsField-3]
+ _ = x[AcctTotalNumUint-3]
+ _ = x[AcctTotalNumByteSlice-4]
+ _ = x[AcctTotalExtraAppPages-5]
+ _ = x[AcctTotalAppsCreated-6]
+ _ = x[AcctTotalAppsOptedIn-7]
+ _ = x[AcctTotalAssetsCreated-8]
+ _ = x[AcctTotalAssets-9]
+ _ = x[AcctTotalBoxes-10]
+ _ = x[AcctTotalBoxBytes-11]
+ _ = x[invalidAcctParamsField-12]
}
-const _AcctParamsField_name = "AcctBalanceAcctMinBalanceAcctAuthAddrinvalidAcctParamsField"
+const _AcctParamsField_name = "AcctBalanceAcctMinBalanceAcctAuthAddrAcctTotalNumUintAcctTotalNumByteSliceAcctTotalExtraAppPagesAcctTotalAppsCreatedAcctTotalAppsOptedInAcctTotalAssetsCreatedAcctTotalAssetsAcctTotalBoxesAcctTotalBoxBytesinvalidAcctParamsField"
-var _AcctParamsField_index = [...]uint8{0, 11, 25, 37, 59}
+var _AcctParamsField_index = [...]uint8{0, 11, 25, 37, 53, 74, 96, 116, 136, 158, 173, 187, 204, 226}
func (i AcctParamsField) String() string {
if i < 0 || i >= AcctParamsField(len(_AcctParamsField_index)-1) {
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index 2b5008f5c..5ae042294 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -41,7 +41,7 @@ func TestGlobalFieldsVersions(t *testing.T) {
}
require.Greater(t, len(fields), 1)
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
@@ -59,7 +59,7 @@ func TestGlobalFieldsVersions(t *testing.T) {
if preLogicVersion < appsEnabledVersion {
require.False(t, proto.Application)
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = proto
ep.Ledger = ledger
@@ -101,7 +101,7 @@ func TestTxnFieldVersions(t *testing.T) {
}
txnaVersion := uint64(appsEnabledVersion)
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
txn := makeSampleTxn()
// We'll reject too early if we have a nonzero RekeyTo, because that
// field must be zero for every txn in the group if this is an old
@@ -137,7 +137,7 @@ func TestTxnFieldVersions(t *testing.T) {
if preLogicVersion < appsEnabledVersion {
require.False(t, proto.Application)
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = proto
ep.Ledger = ledger
ep.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
@@ -190,7 +190,7 @@ func TestTxnEffectsAvailable(t *testing.T) {
ep.TxnGroup[1].Lsig.Logic = ops.Program
_, err := EvalSignature(1, ep)
require.Error(t, err)
- ep.Ledger = MakeLedger(nil)
+ ep.Ledger = NewLedger(nil)
_, err = EvalApp(ops.Program, 1, 888, ep)
if v < txnEffectsVersion {
require.Error(t, err, source)
@@ -219,10 +219,18 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
for _, field := range fields {
// Need to use intc so we can "backversion" the
// program and not have it fail because of pushint.
- text := fmt.Sprintf("intcblock 0 1; intc_0; asset_params_get %s; pop; pop; intc_1", field.field.String())
+ text := fmt.Sprintf("intcblock 0 1; intc_0; asset_params_get %s; bnz ok; err; ok: ", field.field.String())
+ switch field.ftype {
+ case StackUint64: // ensure the return type is uint64 by adding
+ text += " intc_1; +"
+ case StackBytes: // ensure the return type is bytes by using len
+ text += " len" // also happens to ensure that we get non empty - the params fields are fixed width
+ }
// check assembler fails if version before introduction
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- ep, _, _ := makeSampleEnv()
+ ep, txn, ledger := makeSampleEnv()
+ // Create app 55, since txn.ForeignApps[0] == 55
+ ledger.NewAsset(txn.Sender, 55, basics.AssetParams{})
ep.Proto.LogicSigVersion = v
if field.version > v {
testProg(t, text, v, Expect{1, "...was introduced in..."})
@@ -242,7 +250,7 @@ func TestFieldVersions(t *testing.T) {
// This test is weird, it confirms that we don't need to
// bother with a "good" test for AssetHolding and AppParams
// fields. It will fail if we add a field that has a
- // different teal debut version, and then we'll need a test
+ // different debut version, and then we'll need a test
// like TestAssetParamsFieldsVersions that checks the field is
// unavailable before its debut.
@@ -257,3 +265,43 @@ func TestFieldVersions(t *testing.T) {
require.Equal(t, uint64(5), fs.version)
}
}
+
+func TestAcctParamsFieldsVersions(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var fields []acctParamsFieldSpec
+ for _, fs := range acctParamsFieldSpecs {
+ if fs.version > 6 {
+ fields = append(fields, fs)
+ }
+ }
+ require.Greater(t, len(fields), 0)
+
+ for _, field := range fields {
+ // Need to use intc so we can "backversion" the program and not have it
+ // fail because of pushint.
+ // Use of '+' confirms the type, which is uint64 for all fields
+ text := fmt.Sprintf("intcblock 0 1; intc_0; acct_params_get %s; assert; intc_1; +", field.field.String())
+ // check assembler fails if version before introduction
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ ep, txn, ledger := makeSampleEnv()
+ ledger.NewAccount(txn.Sender, 200_000)
+ ep.Proto.LogicSigVersion = v
+ if field.version > v {
+ testProg(t, text, v, Expect{1, "...was introduced in..."})
+ ops := testProg(t, text, field.version) // assemble in the future
+ ops.Program[0] = byte(v) // but set version back to before intro
+ if v < 6 {
+ testAppBytes(t, ops.Program, ep, "illegal opcode", "illegal opcode")
+ } else {
+ testAppBytes(t, ops.Program, ep, "invalid acct_params_get field")
+ }
+ } else {
+ testProg(t, text, v)
+ testApp(t, text, ep)
+ }
+ }
+
+ }
+}
diff --git a/data/transactions/logic/frames.go b/data/transactions/logic/frames.go
index e145ac8fc..1acc0c3c2 100644
--- a/data/transactions/logic/frames.go
+++ b/data/transactions/logic/frames.go
@@ -114,12 +114,7 @@ func opDupN(cx *EvalContext) error {
n := int(cx.program[cx.pc+1])
finalLen := len(cx.stack) + n
- if cap(cx.stack) < finalLen {
- // Let's grow all at once, plus a little slack.
- newStack := make([]stackValue, len(cx.stack), finalLen+4)
- copy(newStack, cx.stack)
- cx.stack = newStack
- }
+ cx.ensureStackCap(finalLen)
for i := 0; i < n; i++ {
// There will be enough room that this will not allocate
cx.stack = append(cx.stack, cx.stack[last])
diff --git a/data/transactions/logic/frames_test.go b/data/transactions/logic/frames_test.go
index f1c1780c3..b02714a88 100644
--- a/data/transactions/logic/frames_test.go
+++ b/data/transactions/logic/frames_test.go
@@ -178,7 +178,7 @@ main:
+ // This consumes the top arg. We could complain in assembly if checked stack height against pgm.fp
dup; dup // But the dup;dup restores it, so it _evals_ fine.
retsub
-`, AssemblerMaxVersion)
+`, fpVersion)
}
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index f4742f1b7..f4054ff14 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,6 +1,6 @@
{
"EvalMaxVersion": 8,
- "LogicSigVersion": 7,
+ "LogicSigVersion": 8,
"Ops": [
{
"Opcode": 0,
@@ -370,7 +370,7 @@
"Size": 0,
"Doc": "prepare block of uint64 constants for use by intc",
"DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
- "ImmediateNote": "{varuint length} [{varuint value}, ...]",
+ "ImmediateNote": "{varuint count} [{varuint value}, ...]",
"Groups": [
"Loading Values"
]
@@ -432,7 +432,7 @@
"Size": 0,
"Doc": "prepare block of byte-array constants for use by bytec",
"DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
- "ImmediateNote": "{varuint length} [({varuint value length} bytes), ...]",
+ "ImmediateNote": "{varuint count} [({varuint value length} bytes), ...]",
"Groups": [
"Loading Values"
]
@@ -1033,7 +1033,7 @@
"Name": "bury",
"Args": ".",
"Size": 2,
- "Doc": "Replace the Nth value from the top of the stack. bury 0 fails.",
+ "Doc": "replace the Nth value from the top of the stack with A. bury 0 fails.",
"ImmediateNote": "{uint8 depth}",
"Groups": [
"Flow Control"
@@ -1043,7 +1043,7 @@
"Opcode": 70,
"Name": "popn",
"Size": 2,
- "Doc": "Remove N values from the top of the stack",
+ "Doc": "remove N values from the top of the stack",
"ImmediateNote": "{uint8 stack depth}",
"Groups": [
"Flow Control"
@@ -1342,7 +1342,7 @@
"Args": ".",
"Returns": "U",
"Size": 1,
- "Doc": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
+ "Doc": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
"Groups": [
"State Access"
@@ -1531,9 +1531,18 @@
"ArgEnum": [
"AcctBalance",
"AcctMinBalance",
- "AcctAuthAddr"
+ "AcctAuthAddr",
+ "AcctTotalNumUint",
+ "AcctTotalNumByteSlice",
+ "AcctTotalExtraAppPages",
+ "AcctTotalAppsCreated",
+ "AcctTotalAppsOptedIn",
+ "AcctTotalAssetsCreated",
+ "AcctTotalAssets",
+ "AcctTotalBoxes",
+ "AcctTotalBoxBytes"
],
- "ArgEnumTypes": "UUB",
+ "ArgEnumTypes": "UUBUUUUUUUUU",
"Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
"ImmediateNote": "{uint8 account params field index}",
"Groups": [
@@ -1546,7 +1555,7 @@
"Args": ".",
"Returns": "U",
"Size": 1,
- "Doc": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "Doc": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
"Groups": [
"State Access"
@@ -1577,6 +1586,28 @@
]
},
{
+ "Opcode": 130,
+ "Name": "pushbytess",
+ "Size": 0,
+ "Doc": "push sequences of immediate byte arrays to stack (first byte array being deepest)",
+ "DocExtra": "pushbytess args are not added to the bytecblock during assembly processes",
+ "ImmediateNote": "{varuint count} [({varuint value length} bytes), ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 131,
+ "Name": "pushints",
+ "Size": 0,
+ "Doc": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)",
+ "DocExtra": "pushints args are not added to the intcblock during assembly processes",
+ "ImmediateNote": "{varuint count} [{varuint value}, ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
"Opcode": 132,
"Name": "ed25519verify_bare",
"Args": "BBB",
@@ -1635,7 +1666,7 @@
"Name": "frame_bury",
"Args": ".",
"Size": 2,
- "Doc": "Replace the Nth (signed) value from the frame pointer in the stack",
+ "Doc": "replace the Nth (signed) value from the frame pointer in the stack with A",
"ImmediateNote": "{int8 frame slot}",
"Groups": [
"Flow Control"
@@ -1653,6 +1684,17 @@
]
},
{
+ "Opcode": 142,
+ "Name": "match",
+ "Size": 0,
+ "Doc": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.",
+ "DocExtra": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
+ "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
"Opcode": 144,
"Name": "shl",
"Args": "UU",
@@ -2243,6 +2285,84 @@
]
},
{
+ "Opcode": 185,
+ "Name": "box_create",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1",
+ "DocExtra": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 186,
+ "Name": "box_extract",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 187,
+ "Name": "box_replace",
+ "Args": "BUB",
+ "Size": 1,
+ "Doc": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 188,
+ "Name": "box_del",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "delete box named A if it exists. Return 1 if A existed, 0 otherwise",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 189,
+ "Name": "box_len",
+ "Args": "B",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 190,
+ "Name": "box_get",
+ "Args": "B",
+ "Returns": "BU",
+ "Size": 1,
+ "Doc": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.",
+ "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 191,
+ "Name": "box_put",
+ "Args": "BB",
+ "Size": 1,
+ "Doc": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist",
+ "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
"Opcode": 192,
"Name": "txnas",
"Args": "U",
diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go
index 9f427b0e9..570967305 100644
--- a/data/transactions/logic/ledger_test.go
+++ b/data/transactions/logic/ledger_test.go
@@ -16,6 +16,20 @@
package logic
+/* This Ledger implements LedgerForLogic for unit tests in the logic package. It
+ does *not* carry the protocol around, so it does *not* enforce the various
+ limits imposed there. This helps ensure that the logic package itself
+ enforces those limits, rather than rely on the ledger package. (Which should
+ also do so, to be defensive.)
+
+ This Ledger is not clever enough to have a good mechanism for making changes
+ and rolling them back if the program that makes them fails. It just has a
+ Reset() method that throws away all changes made by programs. Generally,
+ it's probably best to call Reset() after any error test, though you can keep
+ testing if you take into account that changes made before the failure will
+ take effect.
+*/
+
import (
"errors"
"fmt"
@@ -39,15 +53,14 @@ type balanceRecord struct {
mods map[basics.AppIndex]map[string]basics.ValueDelta
}
-func makeBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
- br := balanceRecord{
+func newBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
+ return balanceRecord{
addr: addr,
balance: balance,
locals: make(map[basics.AppIndex]basics.TealKeyValue),
holdings: make(map[basics.AssetIndex]basics.AssetHolding),
mods: make(map[basics.AppIndex]map[string]basics.ValueDelta),
}
- return br
}
// In our test ledger, we don't store the creatables with their
@@ -55,6 +68,9 @@ func makeBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
type appParams struct {
basics.AppParams
Creator basics.Address
+
+ boxes map[string][]byte // will never contain a nil slice
+ boxMods map[string][]byte // nil slice indicates a deletion
}
type asaParams struct {
@@ -71,8 +87,8 @@ type Ledger struct {
rnd basics.Round
}
-// MakeLedger constructs a Ledger with the given balances.
-func MakeLedger(balances map[basics.Address]uint64) *Ledger {
+// NewLedger constructs a Ledger with the given balances.
+func NewLedger(balances map[basics.Address]uint64) *Ledger {
l := new(Ledger)
l.balances = make(map[basics.Address]balanceRecord)
for addr, balance := range balances {
@@ -91,11 +107,15 @@ func (l *Ledger) Reset() {
br.mods = make(map[basics.AppIndex]map[string]basics.ValueDelta)
l.balances[addr] = br
}
+ for id, app := range l.applications {
+ app.boxMods = nil
+ l.applications[id] = app
+ }
}
// NewAccount adds a new account with a given balance to the Ledger.
func (l *Ledger) NewAccount(addr basics.Address, balance uint64) {
- l.balances[addr] = makeBalanceRecord(addr, balance)
+ l.balances[addr] = newBalanceRecord(addr, balance)
}
// NewApp add a new AVM app to the Ledger. In most uses, it only sets up the id
@@ -120,7 +140,7 @@ func (l *Ledger) NewAsset(creator basics.Address, assetID basics.AssetIndex, par
}
br, ok := l.balances[creator]
if !ok {
- br = makeBalanceRecord(creator, 0)
+ br = newBalanceRecord(creator, 0)
}
br.holdings[assetID] = basics.AssetHolding{Amount: params.Total, Frozen: params.DefaultFrozen}
l.balances[creator] = br
@@ -147,7 +167,7 @@ func (l *Ledger) Counter() uint64 {
func (l *Ledger) NewHolding(addr basics.Address, assetID uint64, amount uint64, frozen bool) {
br, ok := l.balances[addr]
if !ok {
- br = makeBalanceRecord(addr, 0)
+ br = newBalanceRecord(addr, 0)
}
br.holdings[basics.AssetIndex(assetID)] = basics.AssetHolding{Amount: amount, Frozen: frozen}
l.balances[addr] = br
@@ -156,7 +176,7 @@ func (l *Ledger) NewHolding(addr basics.Address, assetID uint64, amount uint64,
// NewLocals essentially "opts in" an address to an app id.
func (l *Ledger) NewLocals(addr basics.Address, appID uint64) {
if _, ok := l.balances[addr]; !ok {
- l.balances[addr] = makeBalanceRecord(addr, 0)
+ l.balances[addr] = newBalanceRecord(addr, 0)
}
l.balances[addr].locals[basics.AppIndex(appID)] = basics.TealKeyValue{}
}
@@ -189,14 +209,9 @@ func (l *Ledger) Rekey(addr basics.Address, auth basics.Address) {
}
}
-// Round gives the current Round of the test ledger, which is random but consistent
-func (l *Ledger) Round() basics.Round {
- return l.round()
-}
-
// LatestTimestamp gives a uint64, chosen randomly. It should
// probably increase monotonically, but no tests care yet.
-func (l *Ledger) LatestTimestamp() int64 {
+func (l *Ledger) PrevTimestamp() int64 {
return int64(rand.Uint32() + 1)
}
@@ -234,12 +249,40 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error
schemaTotal := basics.StateSchema{}
pagesTotal := uint32(0)
+ boxesTotal := 0
+ boxBytesTotal := 0
+
apps := make(map[basics.AppIndex]basics.AppParams)
for a, p := range l.applications {
if p.Creator == addr {
apps[a] = p.AppParams
schemaTotal = schemaTotal.AddSchema(p.GlobalStateSchema)
- pagesTotal = p.ExtraProgramPages
+ pagesTotal += p.ExtraProgramPages
+ }
+ if a.Address() == addr {
+ // We found the app that corresponds to this app account. Get box info from there.
+ boxesTotal = len(p.boxes)
+ for k, v := range p.boxes {
+ boxBytesTotal += len(k) + len(v)
+ }
+ for k, v := range p.boxMods {
+ base, ok := p.boxes[k]
+ if ok {
+ if v == nil {
+ // deleted, so remove from totals
+ boxesTotal--
+ boxBytesTotal -= len(k) + len(base)
+ continue
+ }
+ if len(v) != len(base) {
+ panic(fmt.Sprintf("mismatch %v %v", v, base))
+ }
+ continue
+ }
+ // fresh box in mods, count it
+ boxesTotal++
+ boxBytesTotal += len(k) + len(v)
+ }
}
}
@@ -259,6 +302,9 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error
TotalAppLocalStates: uint64(len(locals)),
TotalAssetParams: uint64(len(assets)),
TotalAssets: uint64(len(br.holdings)),
+
+ TotalBoxes: uint64(boxesTotal),
+ TotalBoxBytes: uint64(boxBytesTotal),
},
}, nil
}
@@ -352,6 +398,90 @@ func (l *Ledger) DelGlobal(appIdx basics.AppIndex, key string) error {
return nil
}
+// NewBox makes a new box, through the boxMods mechanism. It can be Reset()
+func (l *Ledger) NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error {
+ if appIdx.Address() != appAddr {
+ panic(fmt.Sprintf("%d %v %v", appIdx, appIdx.Address(), appAddr))
+ }
+ params, ok := l.applications[appIdx]
+ if !ok {
+ return fmt.Errorf("no such app %d", appIdx)
+ }
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ if current, ok := params.boxMods[key]; ok {
+ if current != nil {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+ } else if _, ok := params.boxes[key]; ok {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+ params.boxMods[key] = value
+ l.applications[appIdx] = params
+ return nil
+}
+
+func (l *Ledger) GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error) {
+ params, ok := l.applications[appIdx]
+ if !ok {
+ return nil, false, nil
+ }
+ if params.boxMods != nil {
+ if ps, ok := params.boxMods[key]; ok {
+ if ps == nil { // deletion in mod
+ return nil, false, nil
+ }
+ return ps, true, nil
+ }
+ }
+ if params.boxes == nil {
+ return nil, false, nil
+ }
+ box, ok := params.boxes[key]
+ return box, ok, nil
+}
+
+// SetBox set a box value through the boxMods mechanism. It can be Reset()
+func (l *Ledger) SetBox(appIdx basics.AppIndex, key string, value []byte) error {
+ current, ok, err := l.GetBox(appIdx, key)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("no such box %d", appIdx)
+ }
+ params := l.applications[appIdx] // assured, based on above
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ if len(current) != len(value) {
+ return fmt.Errorf("wrong box size %#v %d != %d", key, len(current), len(value))
+ }
+ params.boxMods[key] = value
+ return nil
+}
+
+// DelBox deletes a value through boxMods mechanism
+func (l *Ledger) DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error) {
+ if appIdx.Address() != appAddr {
+ panic(fmt.Sprintf("%d %v %v", appIdx, appIdx.Address(), appAddr))
+ }
+ _, ok, err := l.GetBox(appIdx, key)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ params := l.applications[appIdx] // assured, based on above
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ params.boxMods[key] = nil
+ return true, nil
+}
+
// GetLocal returns the current value bound to a local key, taking
// into account mods caused by earlier executions.
func (l *Ledger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
@@ -480,11 +610,11 @@ func (l *Ledger) AppParams(appID basics.AppIndex) (basics.AppParams, basics.Addr
func (l *Ledger) move(from basics.Address, to basics.Address, amount uint64) error {
fbr, ok := l.balances[from]
if !ok {
- fbr = makeBalanceRecord(from, 0)
+ fbr = newBalanceRecord(from, 0)
}
tbr, ok := l.balances[to]
if !ok {
- tbr = makeBalanceRecord(to, 0)
+ tbr = newBalanceRecord(to, 0)
}
if fbr.balance < amount {
return fmt.Errorf("insufficient balance")
@@ -546,7 +676,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
fbr, ok := l.balances[from]
if !ok {
- fbr = makeBalanceRecord(from, 0)
+ fbr = newBalanceRecord(from, 0)
}
fholding, ok := fbr.holdings[aid]
if !ok {
@@ -567,7 +697,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
}
tbr, ok := l.balances[to]
if !ok {
- tbr = makeBalanceRecord(to, 0)
+ tbr = newBalanceRecord(to, 0)
}
tholding, ok := tbr.holdings[aid]
if !ok && amount > 0 {
@@ -595,7 +725,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
if !close.IsZero() && fholding.Amount > 0 {
cbr, ok := l.balances[close]
if !ok {
- cbr = makeBalanceRecord(close, 0)
+ cbr = newBalanceRecord(close, 0)
}
cholding, ok := cbr.holdings[aid]
if !ok {
@@ -768,11 +898,6 @@ func (l *Ledger) Perform(gi int, ep *EvalParams) error {
}
}
-// Get() through allocated() implement cowForLogicLedger, so we should
-// be able to make logicLedger with this inside. That let's us to
-// write tests and then poke around and see how the balance table
-// inside is affected.
-
// Get returns the AccountData of an address. This test ledger does
// not handle rewards, so the pening rewards flag is ignored.
func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
@@ -822,7 +947,7 @@ func (l *Ledger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool,
return nil
}
-func (l *Ledger) round() basics.Round {
+func (l *Ledger) Round() basics.Round {
if l.rnd == basics.Round(0) {
// Something big enough to shake out bugs from width
l.rnd = basics.Round(uint64(math.MaxUint32) + 5)
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 1efab93be..38dde2e08 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -24,7 +24,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 8
+const LogicVersion = 9
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -71,6 +71,9 @@ const fpVersion = 8 // changes for frame pointers and simpler function d
// their version, and fixup TestAssemble() in assembler_test.go.
const pairingVersion = 9 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
+// Unlimited Global Storage opcodes
+const boxVersion = 8 // box_*
+
type linearCost struct {
baseCost int
chunkCost int
@@ -448,13 +451,13 @@ var OpSpecs = []OpSpec{
{0x1e, "addw", opAddw, proto("ii:ii"), 2, detDefault()},
{0x1f, "divmodw", opDivModw, proto("iiii:iiii"), 4, costly(20)},
- {0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntConstBlock, "uint ...", immInts)},
+ {0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntImmArgs, "uint ...", immInts)},
{0x21, "intc", opIntConstLoad, proto(":i"), 1, immediates("i").assembler(asmIntC)},
{0x22, "intc_0", opIntConst0, proto(":i"), 1, detDefault()},
{0x23, "intc_1", opIntConst1, proto(":i"), 1, detDefault()},
{0x24, "intc_2", opIntConst2, proto(":i"), 1, detDefault()},
{0x25, "intc_3", opIntConst3, proto(":i"), 1, detDefault()},
- {0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteConstBlock, "bytes ...", immBytess)},
+ {0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteImmArgs, "bytes ...", immBytess)},
{0x27, "bytec", opByteConstLoad, proto(":b"), 1, immediates("i").assembler(asmByteC)},
{0x28, "bytec_0", opByteConst0, proto(":b"), 1, detDefault()},
{0x29, "bytec_1", opByteConst1, proto(":b"), 1, detDefault()},
@@ -552,6 +555,8 @@ var OpSpecs = []OpSpec{
// Immediate bytes and ints. Smaller code size for single use of constant.
{0x80, "pushbytes", opPushBytes, proto(":b"), 3, constants(asmPushBytes, opPushBytes, "bytes", immBytes)},
{0x81, "pushint", opPushInt, proto(":i"), 3, constants(asmPushInt, opPushInt, "uint", immInt)},
+ {0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]"), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
+ {0x83, "pushints", opPushInts, proto(":", "", "[N items]"), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
{0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
@@ -563,7 +568,7 @@ var OpSpecs = []OpSpec{
{0x8b, "frame_dig", opFrameDig, proto(":a"), fpVersion, immKinded(immInt8, "i").typed(typeFrameDig)},
{0x8c, "frame_bury", opFrameBury, proto("a:"), fpVersion, immKinded(immInt8, "i").typed(typeFrameBury)},
{0x8d, "switch", opSwitch, proto("i:"), 8, detSwitch()},
- // 0x8e will likely be a switch on pairs of values/targets, called `match`
+ {0x8e, "match", opMatch, proto(":", "[A1, A2, ..., AN], B", ""), 8, detSwitch().trust()},
// More math
{0x90, "shl", opShiftLeft, proto("ii:i"), 4, detDefault()},
@@ -612,6 +617,15 @@ var OpSpecs = []OpSpec{
{0xb7, "gitxn", opGitxn, proto(":a"), 6, immediates("t", "f").field("f", &TxnFields).only(modeApp).assembler(asmGitxn)},
{0xb8, "gitxna", opGitxna, proto(":a"), 6, immediates("t", "f", "i").field("f", &TxnArrayFields).only(modeApp)},
+ // Unlimited Global Storage - Boxes
+ {0xb9, "box_create", opBoxCreate, proto("bi:i"), boxVersion, only(modeApp)},
+ {0xba, "box_extract", opBoxExtract, proto("bii:b"), boxVersion, only(modeApp)},
+ {0xbb, "box_replace", opBoxReplace, proto("bib:"), boxVersion, only(modeApp)},
+ {0xbc, "box_del", opBoxDel, proto("b:i"), boxVersion, only(modeApp)},
+ {0xbd, "box_len", opBoxLen, proto("b:ii"), boxVersion, only(modeApp)},
+ {0xbe, "box_get", opBoxGet, proto("b:bi"), boxVersion, only(modeApp)},
+ {0xbf, "box_put", opBoxPut, proto("bb:"), boxVersion, only(modeApp)},
+
// Dynamic indexing
{0xc0, "txnas", opTxnas, proto("i:a"), 5, field("f", &TxnArrayFields)},
{0xc1, "gtxnas", opGtxnas, proto("i:a"), 5, immediates("t", "f").field("f", &TxnArrayFields)},
diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go
index cb43efeb5..25dfea40a 100644
--- a/data/transactions/logic/pairing.go
+++ b/data/transactions/logic/pairing.go
@@ -110,7 +110,6 @@ func opBn256Pairing(cx *EvalContext) error {
return errors.New("pairing failed")
}
cx.stack = cx.stack[:last]
- cx.stack[prev].Uint = boolToUint(ok)
- cx.stack[prev].Bytes = nil
+ cx.stack[prev] = boolToSV(ok)
return nil
}
diff --git a/data/transactions/logic/parsing.go b/data/transactions/logic/parsing.go
new file mode 100644
index 000000000..7a7429221
--- /dev/null
+++ b/data/transactions/logic/parsing.go
@@ -0,0 +1,105 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/algorand/avm-abi/abi"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// AppCallBytes represents an encoding and a value of an app call argument.
+type AppCallBytes struct {
+ Encoding string `codec:"encoding"`
+ Value string `codec:"value"`
+}
+
+// NewAppCallBytes parses an argument of the form "encoding:value" to AppCallBytes.
+func NewAppCallBytes(arg string) (AppCallBytes, error) {
+ parts := strings.SplitN(arg, ":", 2)
+ if len(parts) != 2 {
+ return AppCallBytes{}, fmt.Errorf("all arguments and box names should be of the form 'encoding:value'")
+ }
+ return AppCallBytes{
+ Encoding: parts[0],
+ Value: parts[1],
+ }, nil
+}
+
+// Raw converts an AppCallBytes arg to a byte array.
+func (arg AppCallBytes) Raw() (rawValue []byte, parseErr error) {
+ switch arg.Encoding {
+ case "str", "string":
+ rawValue = []byte(arg.Value)
+ case "int", "integer":
+ num, err := strconv.ParseUint(arg.Value, 10, 64)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not parse uint64 from string (%s): %v", arg.Value, err)
+ return
+ }
+ ibytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(ibytes, num)
+ rawValue = ibytes
+ case "addr", "address":
+ addr, err := basics.UnmarshalChecksumAddress(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not unmarshal checksummed address from string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = addr[:]
+ case "b32", "base32", "byte base32":
+ data, err := base32.StdEncoding.DecodeString(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode base32-encoded string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = data
+ case "b64", "base64", "byte base64":
+ data, err := base64.StdEncoding.DecodeString(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode base64-encoded string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = data
+ case "abi":
+ typeAndValue := strings.SplitN(arg.Value, ":", 2)
+ if len(typeAndValue) != 2 {
+ parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
+ return
+ }
+ abiType, err := abi.TypeOf(typeAndValue[0])
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
+ return
+ }
+ value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
+ return
+ }
+ return abiType.Encode(value)
+ default:
+ parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
+ }
+ return
+}
diff --git a/data/transactions/logic/parsing_test.go b/data/transactions/logic/parsing_test.go
new file mode 100644
index 000000000..5bc3113b8
--- /dev/null
+++ b/data/transactions/logic/parsing_test.go
@@ -0,0 +1,139 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "testing"
+
+ "github.com/algorand/avm-abi/abi"
+ "github.com/algorand/go-algorand/data/basics"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ t.Run("errors", func(t *testing.T) {
+ _, err := NewAppCallBytes("hello")
+ require.Error(t, err)
+
+ for _, v := range []string{":x", "int:-1"} {
+ acb, err := NewAppCallBytes(v)
+ _, err = acb.Raw()
+ require.Error(t, err)
+ }
+ })
+
+ for _, v := range []string{"hello", "1:2"} {
+ for _, e := range []string{"str", "string"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, v, string(r))
+ })
+ }
+
+ for _, e := range []string{"b32", "base32", "byte base32"} {
+ ve := base32.StdEncoding.EncodeToString([]byte(v))
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, ve), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, ve))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, ve, base32.StdEncoding.EncodeToString(r))
+ })
+ }
+
+ for _, e := range []string{"b64", "base64", "byte base64"} {
+ ve := base64.StdEncoding.EncodeToString([]byte(v))
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, ve), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, ve))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, ve, base64.StdEncoding.EncodeToString(r))
+ })
+ }
+ }
+
+ for _, v := range []uint64{1, 0, math.MaxUint64} {
+ for _, e := range []string{"int", "integer"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, v, binary.BigEndian.Uint64(r))
+ })
+ }
+ }
+
+ for _, v := range []string{"737777777777777777777777777777777777777777777777777UFEJ2CI"} {
+ for _, e := range []string{"addr", "address"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ addr, err := basics.UnmarshalChecksumAddress(v)
+ require.NoError(t, err)
+ expectedBytes := []byte{}
+ expectedBytes = addr[:]
+ require.Equal(t, expectedBytes, r)
+ })
+ }
+ }
+
+ type abiCase struct {
+ abiType, rawValue string
+ }
+ for _, v := range []abiCase{
+ {
+ `(uint64,string,bool[])`,
+ `[399,"should pass",[true,false,false,true]]`,
+ }} {
+ for _, e := range []string{"abi"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf(
+ "%v:%v:%v", e, v.abiType, v.rawValue))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.NotEmpty(t, r)
+
+ // Confirm round-trip works.
+ abiType, err := abi.TypeOf(v.abiType)
+ require.NoError(t, err)
+ d, err := abiType.Decode(r)
+ require.NoError(t, err)
+ vv, err := abiType.Encode(d)
+ require.NoError(t, err)
+ require.Equal(t, r, vv)
+ })
+ }
+ }
+}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 7a299a962..59dc14368 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -64,15 +64,15 @@
},
{
"name": "keyword.control.teal",
- "match": "^(assert|b|bnz|bury|bz|callsub|cover|dig|dup|dup2|dupn|err|frame_bury|frame_dig|pop|popn|proto|retsub|return|select|swap|switch|uncover)\\b"
+ "match": "^(assert|b|bnz|bury|bz|callsub|cover|dig|dup|dup2|dupn|err|frame_bury|frame_dig|match|pop|popn|proto|retsub|return|select|swap|switch|uncover)\\b"
},
{
"name": "keyword.other.teal",
- "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushint|store|stores|txn|txna|txnas)\\b"
+ "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushbytess|pushint|pushints|store|stores|txn|txna|txnas)\\b"
},
{
"name": "keyword.other.unit.teal",
- "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|block|log|min_balance)\\b"
+ "match": "^(box_create|box_del|box_extract|box_get|box_len|box_put|box_replace|acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|block|log|min_balance)\\b"
},
{
"name": "keyword.operator.teal",
@@ -112,7 +112,7 @@
},
{
"name": "variable.parameter.teal",
- "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|VrfAlgorand|BlkSeed|BlkTimestamp)\\b"
+ "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|VrfAlgorand|BlkSeed|BlkTimestamp)\\b"
}
]
},
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 779a74d59..bcb87f64e 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -53,6 +53,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// BoxRef
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// EvalDelta
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -178,56 +186,60 @@ import (
func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0005Len := uint32(11)
- var zb0005Mask uint16 /* 12 bits */
+ zb0006Len := uint32(12)
+ var zb0006Mask uint16 /* 13 bits */
if len((*z).ApplicationArgs) == 0 {
- zb0005Len--
- zb0005Mask |= 0x2
+ zb0006Len--
+ zb0006Mask |= 0x2
}
if (*z).OnCompletion == 0 {
- zb0005Len--
- zb0005Mask |= 0x4
+ zb0006Len--
+ zb0006Mask |= 0x4
}
if len((*z).ApprovalProgram) == 0 {
- zb0005Len--
- zb0005Mask |= 0x8
+ zb0006Len--
+ zb0006Mask |= 0x8
}
if len((*z).ForeignAssets) == 0 {
- zb0005Len--
- zb0005Mask |= 0x10
+ zb0006Len--
+ zb0006Mask |= 0x10
}
if len((*z).Accounts) == 0 {
- zb0005Len--
- zb0005Mask |= 0x20
+ zb0006Len--
+ zb0006Mask |= 0x20
+ }
+ if len((*z).Boxes) == 0 {
+ zb0006Len--
+ zb0006Mask |= 0x40
}
if (*z).ExtraProgramPages == 0 {
- zb0005Len--
- zb0005Mask |= 0x40
+ zb0006Len--
+ zb0006Mask |= 0x80
}
if len((*z).ForeignApps) == 0 {
- zb0005Len--
- zb0005Mask |= 0x80
+ zb0006Len--
+ zb0006Mask |= 0x100
}
if (*z).GlobalStateSchema.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x100
+ zb0006Len--
+ zb0006Mask |= 0x200
}
if (*z).ApplicationID.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x200
+ zb0006Len--
+ zb0006Mask |= 0x400
}
if (*z).LocalStateSchema.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x400
+ zb0006Len--
+ zb0006Mask |= 0x800
}
if len((*z).ClearStateProgram) == 0 {
- zb0005Len--
- zb0005Mask |= 0x800
+ zb0006Len--
+ zb0006Mask |= 0x1000
}
- // variable map header, size zb0005Len
- o = append(o, 0x80|uint8(zb0005Len))
- if zb0005Len != 0 {
- if (zb0005Mask & 0x2) == 0 { // if not empty
+ // variable map header, size zb0006Len
+ o = append(o, 0x80|uint8(zb0006Len))
+ if zb0006Len != 0 {
+ if (zb0006Mask & 0x2) == 0 { // if not empty
// string "apaa"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
if (*z).ApplicationArgs == nil {
@@ -239,17 +251,17 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendBytes(o, (*z).ApplicationArgs[zb0001])
}
}
- if (zb0005Mask & 0x4) == 0 { // if not empty
+ if (zb0006Mask & 0x4) == 0 { // if not empty
// string "apan"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
o = msgp.AppendUint64(o, uint64((*z).OnCompletion))
}
- if (zb0005Mask & 0x8) == 0 { // if not empty
+ if (zb0006Mask & 0x8) == 0 { // if not empty
// string "apap"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
o = msgp.AppendBytes(o, (*z).ApprovalProgram)
}
- if (zb0005Mask & 0x10) == 0 { // if not empty
+ if (zb0006Mask & 0x10) == 0 { // if not empty
// string "apas"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
if (*z).ForeignAssets == nil {
@@ -257,11 +269,11 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignAssets)))
}
- for zb0004 := range (*z).ForeignAssets {
- o = (*z).ForeignAssets[zb0004].MarshalMsg(o)
+ for zb0005 := range (*z).ForeignAssets {
+ o = (*z).ForeignAssets[zb0005].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x20) == 0 { // if not empty
+ if (zb0006Mask & 0x20) == 0 { // if not empty
// string "apat"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
if (*z).Accounts == nil {
@@ -273,12 +285,46 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = (*z).Accounts[zb0002].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x40) == 0 { // if not empty
+ if (zb0006Mask & 0x40) == 0 { // if not empty
+ // string "apbx"
+ o = append(o, 0xa4, 0x61, 0x70, 0x62, 0x78)
+ if (*z).Boxes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Boxes)))
+ }
+ for zb0004 := range (*z).Boxes {
+ // omitempty: check for empty values
+ zb0007Len := uint32(2)
+ var zb0007Mask uint8 /* 3 bits */
+ if (*z).Boxes[zb0004].Index == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x2
+ }
+ if len((*z).Boxes[zb0004].Name) == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x4
+ }
+ // variable map header, size zb0007Len
+ o = append(o, 0x80|uint8(zb0007Len))
+ if (zb0007Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).Boxes[zb0004].Index)
+ }
+ if (zb0007Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).Boxes[zb0004].Name)
+ }
+ }
+ }
+ if (zb0006Mask & 0x80) == 0 { // if not empty
// string "apep"
o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
o = msgp.AppendUint32(o, (*z).ExtraProgramPages)
}
- if (zb0005Mask & 0x80) == 0 { // if not empty
+ if (zb0006Mask & 0x100) == 0 { // if not empty
// string "apfa"
o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
if (*z).ForeignApps == nil {
@@ -290,22 +336,22 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = (*z).ForeignApps[zb0003].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x100) == 0 { // if not empty
+ if (zb0006Mask & 0x200) == 0 { // if not empty
// string "apgs"
o = append(o, 0xa4, 0x61, 0x70, 0x67, 0x73)
o = (*z).GlobalStateSchema.MarshalMsg(o)
}
- if (zb0005Mask & 0x200) == 0 { // if not empty
+ if (zb0006Mask & 0x400) == 0 { // if not empty
// string "apid"
o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
o = (*z).ApplicationID.MarshalMsg(o)
}
- if (zb0005Mask & 0x400) == 0 { // if not empty
+ if (zb0006Mask & 0x800) == 0 { // if not empty
// string "apls"
o = append(o, 0xa4, 0x61, 0x70, 0x6c, 0x73)
o = (*z).LocalStateSchema.MarshalMsg(o)
}
- if (zb0005Mask & 0x800) == 0 { // if not empty
+ if (zb0006Mask & 0x1000) == 0 { // if not empty
// string "apsu"
o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
o = msgp.AppendBytes(o, (*z).ClearStateProgram)
@@ -323,55 +369,55 @@ func (_ *ApplicationCallTxnFields) CanMarshalMsg(z interface{}) bool {
func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).ApplicationID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
return
}
- (*z).OnCompletion = OnCompletion(zb0007)
+ (*z).OnCompletion = OnCompletion(zb0008)
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0008 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(encodedMaxApplicationArgs))
+ if zb0009 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0009 {
+ if zb0010 {
(*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0008 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0008]
+ } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0009 {
+ (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0009]
} else {
- (*z).ApplicationArgs = make([][]byte, zb0008)
+ (*z).ApplicationArgs = make([][]byte, zb0009)
}
for zb0001 := range (*z).ApplicationArgs {
(*z).ApplicationArgs[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0001])
@@ -381,26 +427,26 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0010 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxAccounts))
+ if zb0011 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0011 {
+ if zb0012 {
(*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0010 {
- (*z).Accounts = ((*z).Accounts)[:zb0010]
+ } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0011 {
+ (*z).Accounts = ((*z).Accounts)[:zb0011]
} else {
- (*z).Accounts = make([]basics.Address, zb0010)
+ (*z).Accounts = make([]basics.Address, zb0011)
}
for zb0002 := range (*z).Accounts {
bts, err = (*z).Accounts[zb0002].UnmarshalMsg(bts)
@@ -410,26 +456,26 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0012 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxForeignApps))
+ if zb0013 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0013 {
+ if zb0014 {
(*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0012 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0012]
+ } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0013 {
+ (*z).ForeignApps = ((*z).ForeignApps)[:zb0013]
} else {
- (*z).ForeignApps = make([]basics.AppIndex, zb0012)
+ (*z).ForeignApps = make([]basics.AppIndex, zb0013)
}
for zb0003 := range (*z).ForeignApps {
bts, err = (*z).ForeignApps[zb0003].UnmarshalMsg(bts)
@@ -439,61 +485,154 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0015 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0016 {
+ (*z).Boxes = nil
+ } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0015 {
+ (*z).Boxes = ((*z).Boxes)[:zb0015]
+ } else {
+ (*z).Boxes = make([]BoxRef, zb0015)
+ }
+ for zb0004 := range (*z).Boxes {
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ if zb0017 > 0 {
+ zb0017--
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0017 > 0 {
+ zb0017--
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0017 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0017)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ if zb0018 {
+ (*z).Boxes[zb0004] = BoxRef{}
+ }
+ for zb0017 > 0 {
+ zb0017--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Index")
+ return
+ }
+ case "n":
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0006 > 0 {
+ zb0006--
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0014 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignAssets))
+ if zb0019 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0015 {
+ if zb0020 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0014 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0014]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0019 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0019]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0014)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0019)
}
- for zb0004 := range (*z).ForeignAssets {
- bts, err = (*z).ForeignAssets[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).ForeignAssets {
+ bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0004)
+ err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0005)
return
}
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).LocalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalStateSchema")
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).GlobalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalStateSchema")
return
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0021 int
+ zb0021, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0016 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(config.MaxAvailableAppProgramLen))
+ if zb0021 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -502,16 +641,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0022 int
+ zb0022, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0017 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxAvailableAppProgramLen))
+ if zb0022 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -520,16 +659,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
(*z).ExtraProgramPages, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
return
}
}
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
+ if zb0006 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0006)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -540,11 +679,11 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err)
return
}
- if zb0006 {
+ if zb0007 {
(*z) = ApplicationCallTxnFields{}
}
- for zb0005 > 0 {
- zb0005--
+ for zb0006 > 0 {
+ zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -559,33 +698,33 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
case "apan":
{
- var zb0018 uint64
- zb0018, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0023 uint64
+ zb0023, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).OnCompletion = OnCompletion(zb0018)
+ (*z).OnCompletion = OnCompletion(zb0023)
}
case "apaa":
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0024 int
+ var zb0025 bool
+ zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0019 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxApplicationArgs))
+ if zb0024 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0020 {
+ if zb0025 {
(*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0019 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0019]
+ } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0024 {
+ (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0024]
} else {
- (*z).ApplicationArgs = make([][]byte, zb0019)
+ (*z).ApplicationArgs = make([][]byte, zb0024)
}
for zb0001 := range (*z).ApplicationArgs {
(*z).ApplicationArgs[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0001])
@@ -595,24 +734,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apat":
- var zb0021 int
- var zb0022 bool
- zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0026 int
+ var zb0027 bool
+ zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0021 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxAccounts))
+ if zb0026 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0022 {
+ if zb0027 {
(*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0021 {
- (*z).Accounts = ((*z).Accounts)[:zb0021]
+ } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0026 {
+ (*z).Accounts = ((*z).Accounts)[:zb0026]
} else {
- (*z).Accounts = make([]basics.Address, zb0021)
+ (*z).Accounts = make([]basics.Address, zb0026)
}
for zb0002 := range (*z).Accounts {
bts, err = (*z).Accounts[zb0002].UnmarshalMsg(bts)
@@ -622,24 +761,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apfa":
- var zb0023 int
- var zb0024 bool
- zb0023, zb0024, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0028 int
+ var zb0029 bool
+ zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0023 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedMaxForeignApps))
+ if zb0028 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0024 {
+ if zb0029 {
(*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0023 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0023]
+ } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0028 {
+ (*z).ForeignApps = ((*z).ForeignApps)[:zb0028]
} else {
- (*z).ForeignApps = make([]basics.AppIndex, zb0023)
+ (*z).ForeignApps = make([]basics.AppIndex, zb0028)
}
for zb0003 := range (*z).ForeignApps {
bts, err = (*z).ForeignApps[zb0003].UnmarshalMsg(bts)
@@ -648,30 +787,121 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
+ case "apbx":
+ var zb0030 int
+ var zb0031 bool
+ zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0030 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0030), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0031 {
+ (*z).Boxes = nil
+ } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0030 {
+ (*z).Boxes = ((*z).Boxes)[:zb0030]
+ } else {
+ (*z).Boxes = make([]BoxRef, zb0030)
+ }
+ for zb0004 := range (*z).Boxes {
+ var zb0032 int
+ var zb0033 bool
+ zb0032, zb0033, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ if zb0032 > 0 {
+ zb0032--
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0032 > 0 {
+ zb0032--
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0032 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0032)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ if zb0033 {
+ (*z).Boxes[zb0004] = BoxRef{}
+ }
+ for zb0032 > 0 {
+ zb0032--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "Index")
+ return
+ }
+ case "n":
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ }
+ }
+ }
+ }
case "apas":
- var zb0025 int
- var zb0026 bool
- zb0025, zb0026, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0034 int
+ var zb0035 bool
+ zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0025 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(encodedMaxForeignAssets))
+ if zb0034 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0034), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0026 {
+ if zb0035 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0025 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0025]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0034 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0034]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0025)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0034)
}
- for zb0004 := range (*z).ForeignAssets {
- bts, err = (*z).ForeignAssets[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).ForeignAssets {
+ bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0004)
+ err = msgp.WrapError(err, "ForeignAssets", zb0005)
return
}
}
@@ -688,14 +918,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apap":
- var zb0027 int
- zb0027, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0036 int
+ zb0036, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0027 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(config.MaxAvailableAppProgramLen))
+ if zb0036 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0036), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -704,14 +934,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apsu":
- var zb0028 int
- zb0028, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0037 int
+ zb0037, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0028 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(config.MaxAvailableAppProgramLen))
+ if zb0037 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -758,8 +988,12 @@ func (z *ApplicationCallTxnFields) Msgsize() (s int) {
s += (*z).ForeignApps[zb0003].Msgsize()
}
s += 5 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).ForeignAssets {
- s += (*z).ForeignAssets[zb0004].Msgsize()
+ for zb0004 := range (*z).Boxes {
+ s += 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).Boxes[zb0004].Name)
+ }
+ s += 5 + msgp.ArrayHeaderSize
+ for zb0005 := range (*z).ForeignAssets {
+ s += (*z).ForeignAssets[zb0005].Msgsize()
}
s += 5 + (*z).LocalStateSchema.Msgsize() + 5 + (*z).GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ClearStateProgram) + 5 + msgp.Uint32Size
return
@@ -767,7 +1001,7 @@ func (z *ApplicationCallTxnFields) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *ApplicationCallTxnFields) MsgIsZero() bool {
- return ((*z).ApplicationID.MsgIsZero()) && ((*z).OnCompletion == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).ForeignAssets) == 0) && ((*z).LocalStateSchema.MsgIsZero()) && ((*z).GlobalStateSchema.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).ExtraProgramPages == 0)
+ return ((*z).ApplicationID.MsgIsZero()) && ((*z).OnCompletion == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).Boxes) == 0) && (len((*z).ForeignAssets) == 0) && ((*z).LocalStateSchema.MsgIsZero()) && ((*z).GlobalStateSchema.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).ExtraProgramPages == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -1517,6 +1751,135 @@ func (z *AssetTransferTxnFields) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *BoxRef) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if (*z).Index == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if len((*z).Name) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).Index)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).Name)
+ }
+ }
+ return
+}
+
+func (_ *BoxRef) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BoxRef)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *BoxRef) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = BoxRef{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Index")
+ return
+ }
+ case "n":
+ (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *BoxRef) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BoxRef)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *BoxRef) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).Name)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *BoxRef) MsgIsZero() bool {
+ return ((*z).Index == 0) && (len((*z).Name) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -4200,212 +4563,216 @@ func (z *StateProofTxnFields) MsgIsZero() bool {
func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0006Len := uint32(45)
- var zb0006Mask uint64 /* 54 bits */
+ zb0007Len := uint32(46)
+ var zb0007Mask uint64 /* 55 bits */
if (*z).AssetTransferTxnFields.AssetAmount == 0 {
- zb0006Len--
- zb0006Mask |= 0x200
+ zb0007Len--
+ zb0007Mask |= 0x200
}
if (*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400
+ zb0007Len--
+ zb0007Mask |= 0x400
}
if (*z).AssetFreezeTxnFields.AssetFrozen == false {
- zb0006Len--
- zb0006Mask |= 0x800
+ zb0007Len--
+ zb0007Mask |= 0x800
}
if (*z).PaymentTxnFields.Amount.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000
+ zb0007Len--
+ zb0007Mask |= 0x1000
}
if len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0 {
- zb0006Len--
- zb0006Mask |= 0x2000
+ zb0007Len--
+ zb0007Mask |= 0x2000
}
if (*z).ApplicationCallTxnFields.OnCompletion == 0 {
- zb0006Len--
- zb0006Mask |= 0x4000
+ zb0007Len--
+ zb0007Mask |= 0x4000
}
if len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8000
+ zb0007Len--
+ zb0007Mask |= 0x8000
}
if (*z).AssetConfigTxnFields.AssetParams.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000
+ zb0007Len--
+ zb0007Mask |= 0x10000
}
if len((*z).ApplicationCallTxnFields.ForeignAssets) == 0 {
- zb0006Len--
- zb0006Mask |= 0x20000
+ zb0007Len--
+ zb0007Mask |= 0x20000
}
if len((*z).ApplicationCallTxnFields.Accounts) == 0 {
- zb0006Len--
- zb0006Mask |= 0x40000
+ zb0007Len--
+ zb0007Mask |= 0x40000
+ }
+ if len((*z).ApplicationCallTxnFields.Boxes) == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x80000
}
if (*z).ApplicationCallTxnFields.ExtraProgramPages == 0 {
- zb0006Len--
- zb0006Mask |= 0x80000
+ zb0007Len--
+ zb0007Mask |= 0x100000
}
if len((*z).ApplicationCallTxnFields.ForeignApps) == 0 {
- zb0006Len--
- zb0006Mask |= 0x100000
+ zb0007Len--
+ zb0007Mask |= 0x200000
}
if (*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x200000
+ zb0007Len--
+ zb0007Mask |= 0x400000
}
if (*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000
+ zb0007Len--
+ zb0007Mask |= 0x800000
}
if (*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000
+ zb0007Len--
+ zb0007Mask |= 0x1000000
}
if len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0 {
- zb0006Len--
- zb0006Mask |= 0x1000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000
}
if (*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x2000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000
}
if (*z).AssetTransferTxnFields.AssetSender.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x4000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000
}
if (*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x8000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000
}
if (*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000
}
if (*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000
}
if (*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x40000000
+ zb0007Len--
+ zb0007Mask |= 0x80000000
}
if (*z).Header.Fee.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x80000000
+ zb0007Len--
+ zb0007Mask |= 0x100000000
}
if (*z).Header.FirstValid.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x100000000
+ zb0007Len--
+ zb0007Mask |= 0x200000000
}
if (*z).Header.GenesisID == "" {
- zb0006Len--
- zb0006Mask |= 0x200000000
+ zb0007Len--
+ zb0007Mask |= 0x400000000
}
if (*z).Header.GenesisHash.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000000
+ zb0007Len--
+ zb0007Mask |= 0x800000000
}
if (*z).Header.Group.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000000
+ zb0007Len--
+ zb0007Mask |= 0x1000000000
}
if (*z).Header.LastValid.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000000
}
if (*z).Header.Lease == ([32]byte{}) {
- zb0006Len--
- zb0006Mask |= 0x2000000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000000
}
if (*z).KeyregTxnFields.Nonparticipation == false {
- zb0006Len--
- zb0006Mask |= 0x4000000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000000
}
if len((*z).Header.Note) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8000000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000000
}
if (*z).PaymentTxnFields.Receiver.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000000
}
if (*z).Header.RekeyTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000000
}
if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x40000000000
+ zb0007Len--
+ zb0007Mask |= 0x80000000000
}
if (*z).Header.Sender.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x80000000000
+ zb0007Len--
+ zb0007Mask |= 0x100000000000
}
if (*z).StateProofTxnFields.StateProof.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x100000000000
+ zb0007Len--
+ zb0007Mask |= 0x200000000000
}
if (*z).StateProofTxnFields.Message.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x200000000000
+ zb0007Len--
+ zb0007Mask |= 0x400000000000
}
if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000000000
+ zb0007Len--
+ zb0007Mask |= 0x800000000000
}
if (*z).StateProofTxnFields.StateProofType.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000000000
+ zb0007Len--
+ zb0007Mask |= 0x1000000000000
}
if (*z).Type.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000000000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000000000
}
if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x2000000000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000000000
}
if (*z).KeyregTxnFields.VoteKeyDilution == 0 {
- zb0006Len--
- zb0006Mask |= 0x4000000000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000000000
}
if (*z).KeyregTxnFields.VotePK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x8000000000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000000000
}
if (*z).KeyregTxnFields.VoteLast.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000000000
}
if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000000000
}
- // variable map header, size zb0006Len
- o = msgp.AppendMapHeader(o, zb0006Len)
- if zb0006Len != 0 {
- if (zb0006Mask & 0x200) == 0 { // if not empty
+ // variable map header, size zb0007Len
+ o = msgp.AppendMapHeader(o, zb0007Len)
+ if zb0007Len != 0 {
+ if (zb0007Mask & 0x200) == 0 { // if not empty
// string "aamt"
o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
o = msgp.AppendUint64(o, (*z).AssetTransferTxnFields.AssetAmount)
}
- if (zb0006Mask & 0x400) == 0 { // if not empty
+ if (zb0007Mask & 0x400) == 0 { // if not empty
// string "aclose"
o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
o = (*z).AssetTransferTxnFields.AssetCloseTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x800) == 0 { // if not empty
+ if (zb0007Mask & 0x800) == 0 { // if not empty
// string "afrz"
o = append(o, 0xa4, 0x61, 0x66, 0x72, 0x7a)
o = msgp.AppendBool(o, (*z).AssetFreezeTxnFields.AssetFrozen)
}
- if (zb0006Mask & 0x1000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000) == 0 { // if not empty
// string "amt"
o = append(o, 0xa3, 0x61, 0x6d, 0x74)
o = (*z).PaymentTxnFields.Amount.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000) == 0 { // if not empty
// string "apaa"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
if (*z).ApplicationCallTxnFields.ApplicationArgs == nil {
@@ -4417,22 +4784,22 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
}
}
- if (zb0006Mask & 0x4000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000) == 0 { // if not empty
// string "apan"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
o = msgp.AppendUint64(o, uint64((*z).ApplicationCallTxnFields.OnCompletion))
}
- if (zb0006Mask & 0x8000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000) == 0 { // if not empty
// string "apap"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApprovalProgram)
}
- if (zb0006Mask & 0x10000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000) == 0 { // if not empty
// string "apar"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x72)
o = (*z).AssetConfigTxnFields.AssetParams.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000) == 0 { // if not empty
// string "apas"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
if (*z).ApplicationCallTxnFields.ForeignAssets == nil {
@@ -4440,11 +4807,11 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationCallTxnFields.ForeignAssets)))
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- o = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].MarshalMsg(o)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ o = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x40000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000) == 0 { // if not empty
// string "apat"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
if (*z).ApplicationCallTxnFields.Accounts == nil {
@@ -4456,12 +4823,46 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = (*z).ApplicationCallTxnFields.Accounts[zb0003].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x80000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000) == 0 { // if not empty
+ // string "apbx"
+ o = append(o, 0xa4, 0x61, 0x70, 0x62, 0x78)
+ if (*z).ApplicationCallTxnFields.Boxes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationCallTxnFields.Boxes)))
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ // omitempty: check for empty values
+ zb0008Len := uint32(2)
+ var zb0008Mask uint8 /* 3 bits */
+ if (*z).ApplicationCallTxnFields.Boxes[zb0005].Index == 0 {
+ zb0008Len--
+ zb0008Mask |= 0x2
+ }
+ if len((*z).ApplicationCallTxnFields.Boxes[zb0005].Name) == 0 {
+ zb0008Len--
+ zb0008Mask |= 0x4
+ }
+ // variable map header, size zb0008Len
+ o = append(o, 0x80|uint8(zb0008Len))
+ if (zb0008Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).ApplicationCallTxnFields.Boxes[zb0005].Index)
+ }
+ if (zb0008Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ }
+ }
+ }
+ if (zb0007Mask & 0x100000) == 0 { // if not empty
// string "apep"
o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
o = msgp.AppendUint32(o, (*z).ApplicationCallTxnFields.ExtraProgramPages)
}
- if (zb0006Mask & 0x100000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000) == 0 { // if not empty
// string "apfa"
o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
if (*z).ApplicationCallTxnFields.ForeignApps == nil {
@@ -4473,167 +4874,167 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x200000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000) == 0 { // if not empty
// string "apgs"
o = append(o, 0xa4, 0x61, 0x70, 0x67, 0x73)
o = (*z).ApplicationCallTxnFields.GlobalStateSchema.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000) == 0 { // if not empty
// string "apid"
o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
o = (*z).ApplicationCallTxnFields.ApplicationID.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000) == 0 { // if not empty
// string "apls"
o = append(o, 0xa4, 0x61, 0x70, 0x6c, 0x73)
o = (*z).ApplicationCallTxnFields.LocalStateSchema.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000) == 0 { // if not empty
// string "apsu"
o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ClearStateProgram)
}
- if (zb0006Mask & 0x2000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000) == 0 { // if not empty
// string "arcv"
o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
o = (*z).AssetTransferTxnFields.AssetReceiver.MarshalMsg(o)
}
- if (zb0006Mask & 0x4000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000) == 0 { // if not empty
// string "asnd"
o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
o = (*z).AssetTransferTxnFields.AssetSender.MarshalMsg(o)
}
- if (zb0006Mask & 0x8000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000) == 0 { // if not empty
// string "caid"
o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
o = (*z).AssetConfigTxnFields.ConfigAsset.MarshalMsg(o)
}
- if (zb0006Mask & 0x10000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000) == 0 { // if not empty
// string "close"
o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
o = (*z).PaymentTxnFields.CloseRemainderTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000) == 0 { // if not empty
// string "fadd"
o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAccount.MarshalMsg(o)
}
- if (zb0006Mask & 0x40000000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000000) == 0 { // if not empty
// string "faid"
o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAsset.MarshalMsg(o)
}
- if (zb0006Mask & 0x80000000) == 0 { // if not empty
+ if (zb0007Mask & 0x100000000) == 0 { // if not empty
// string "fee"
o = append(o, 0xa3, 0x66, 0x65, 0x65)
o = (*z).Header.Fee.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000000) == 0 { // if not empty
// string "fv"
o = append(o, 0xa2, 0x66, 0x76)
o = (*z).Header.FirstValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000000) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Header.GenesisID)
}
- if (zb0006Mask & 0x400000000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000000) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Header.GenesisHash.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000000) == 0 { // if not empty
// string "grp"
o = append(o, 0xa3, 0x67, 0x72, 0x70)
o = (*z).Header.Group.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000000) == 0 { // if not empty
// string "lv"
o = append(o, 0xa2, 0x6c, 0x76)
o = (*z).Header.LastValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000000) == 0 { // if not empty
// string "lx"
o = append(o, 0xa2, 0x6c, 0x78)
o = msgp.AppendBytes(o, ((*z).Header.Lease)[:])
}
- if (zb0006Mask & 0x4000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000000) == 0 { // if not empty
// string "nonpart"
o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74)
o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation)
}
- if (zb0006Mask & 0x8000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000000) == 0 { // if not empty
// string "note"
o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
o = msgp.AppendBytes(o, (*z).Header.Note)
}
- if (zb0006Mask & 0x10000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000000) == 0 { // if not empty
// string "rcv"
o = append(o, 0xa3, 0x72, 0x63, 0x76)
o = (*z).PaymentTxnFields.Receiver.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000000) == 0 { // if not empty
// string "rekey"
o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
o = (*z).Header.RekeyTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x40000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000000000) == 0 { // if not empty
// string "selkey"
o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o)
}
- if (zb0006Mask & 0x80000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x100000000000) == 0 { // if not empty
// string "snd"
o = append(o, 0xa3, 0x73, 0x6e, 0x64)
o = (*z).Header.Sender.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000000000) == 0 { // if not empty
// string "sp"
o = append(o, 0xa2, 0x73, 0x70)
o = (*z).StateProofTxnFields.StateProof.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000000000) == 0 { // if not empty
// string "spmsg"
o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67)
o = (*z).StateProofTxnFields.Message.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000000000) == 0 { // if not empty
// string "sprfkey"
o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x66, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.StateProofPK.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000000000) == 0 { // if not empty
// string "sptype"
o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65)
o = (*z).StateProofTxnFields.StateProofType.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000000000) == 0 { // if not empty
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
o = (*z).Type.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000000000) == 0 { // if not empty
// string "votefst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
o = (*z).KeyregTxnFields.VoteFirst.MarshalMsg(o)
}
- if (zb0006Mask & 0x4000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000000000) == 0 { // if not empty
// string "votekd"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
o = msgp.AppendUint64(o, (*z).KeyregTxnFields.VoteKeyDilution)
}
- if (zb0006Mask & 0x8000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000000000) == 0 { // if not empty
// string "votekey"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.VotePK.MarshalMsg(o)
}
- if (zb0006Mask & 0x10000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000000000) == 0 { // if not empty
// string "votelst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
o = (*z).KeyregTxnFields.VoteLast.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000000000) == 0 { // if not empty
// string "xaid"
o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
o = (*z).AssetTransferTxnFields.XferAsset.MarshalMsg(o)
@@ -4651,65 +5052,65 @@ func (_ *Transaction) CanMarshalMsg(z interface{}) bool {
func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Type.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Type")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Sender.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Sender")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Fee.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Fee")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.FirstValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FirstValid")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.LastValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LastValid")
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0009 int
+ zb0009, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Note")
return
}
- if zb0008 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxTxnNoteBytes))
+ if zb0009 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note)
@@ -4718,246 +5119,246 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Group.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Group")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = msgp.ReadExactBytes(bts, ((*z).Header.Lease)[:])
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Lease")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.RekeyTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VotePK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VotePK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.SelectionPK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.StateProofPK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofPK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VoteFirst.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VoteLast.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteLast")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).KeyregTxnFields.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).KeyregTxnFields.Nonparticipation, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Nonparticipation")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.Receiver.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Receiver")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.Amount.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Amount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.CloseRemainderTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetConfigTxnFields.ConfigAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetConfigTxnFields.AssetParams.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetParams")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.XferAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "XferAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).AssetTransferTxnFields.AssetAmount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetSender.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetSender")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetReceiver.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetCloseTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetFreezeTxnFields.FreezeAccount.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetFreezeTxnFields.FreezeAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).AssetFreezeTxnFields.AssetFrozen, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetFrozen")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.ApplicationID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
{
- var zb0009 uint64
- zb0009, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0009)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0010)
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0010 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxApplicationArgs))
+ if zb0011 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0011 {
+ if zb0012 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0010 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0010]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0011 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0011]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0010)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0011)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -4967,26 +5368,26 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0012 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxAccounts))
+ if zb0013 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0013 {
+ if zb0014 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0012 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0012]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0013 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0013]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0012)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0013)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -4996,26 +5397,26 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0014 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignApps))
+ if zb0015 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0015 {
+ if zb0016 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0014 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0014]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0015 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0015]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0014)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0015)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5025,61 +5426,154 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0016 int
- var zb0017 bool
- zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0017 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0017), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0018 {
+ (*z).ApplicationCallTxnFields.Boxes = nil
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0017 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0017]
+ } else {
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0017)
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ if zb0019 > 0 {
+ zb0019--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ zb0019--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0019)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ if zb0020 {
+ (*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
+ }
+ for zb0019 > 0 {
+ zb0019--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Index")
+ return
+ }
+ case "n":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0021 int
+ var zb0022 bool
+ zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0016 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxForeignAssets))
+ if zb0021 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0017 {
+ if zb0022 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0016 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0016]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0021 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0021]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0016)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0021)
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].UnmarshalMsg(bts)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0005)
+ err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0006)
return
}
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.LocalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalStateSchema")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.GlobalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalStateSchema")
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0018 int
- zb0018, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0018 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxAvailableAppProgramLen))
+ if zb0023 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -5088,16 +5582,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0024 int
+ zb0024, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0019 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxAvailableAppProgramLen))
+ if zb0024 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -5106,40 +5600,40 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).ApplicationCallTxnFields.ExtraProgramPages, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.StateProofType.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofType")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.StateProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProof")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.Message.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Message")
return
}
}
- if zb0006 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0006)
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -5150,11 +5644,11 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0007 {
+ if zb0008 {
(*z) = Transaction{}
}
- for zb0006 > 0 {
- zb0006--
+ for zb0007 > 0 {
+ zb0007--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -5192,14 +5686,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "note":
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0025 int
+ zb0025, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "Note")
return
}
- if zb0020 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxTxnNoteBytes))
+ if zb0025 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0025), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note)
@@ -5365,33 +5859,33 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "apan":
{
- var zb0021 uint64
- zb0021, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0026 uint64
+ zb0026, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0021)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0026)
}
case "apaa":
- var zb0022 int
- var zb0023 bool
- zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0027 int
+ var zb0028 bool
+ zb0027, zb0028, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0022 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedMaxApplicationArgs))
+ if zb0027 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0027), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0023 {
+ if zb0028 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0022 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0022]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0027 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0027]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0022)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0027)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -5401,24 +5895,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apat":
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0029 int
+ var zb0030 bool
+ zb0029, zb0030, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0024 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxAccounts))
+ if zb0029 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0029), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0025 {
+ if zb0030 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0024 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0024]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0029 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0029]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0024)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0029)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -5428,24 +5922,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apfa":
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0031 int
+ var zb0032 bool
+ zb0031, zb0032, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0026 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxForeignApps))
+ if zb0031 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0031), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0027 {
+ if zb0032 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0026 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0026]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0031 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0031]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0026)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0031)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5454,30 +5948,121 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
+ case "apbx":
+ var zb0033 int
+ var zb0034 bool
+ zb0033, zb0034, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0033 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0034 {
+ (*z).ApplicationCallTxnFields.Boxes = nil
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0033 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0033]
+ } else {
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0033)
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ var zb0035 int
+ var zb0036 bool
+ zb0035, zb0036, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0035, zb0036, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ if zb0035 > 0 {
+ zb0035--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0035 > 0 {
+ zb0035--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0035 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0035)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ if zb0036 {
+ (*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
+ }
+ for zb0035 > 0 {
+ zb0035--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "Index")
+ return
+ }
+ case "n":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ }
+ }
+ }
+ }
case "apas":
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0037 int
+ var zb0038 bool
+ zb0037, zb0038, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0028 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignAssets))
+ if zb0037 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0029 {
+ if zb0038 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0028 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0028]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0037 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0037]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0028)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0037)
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].UnmarshalMsg(bts)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0005)
+ err = msgp.WrapError(err, "ForeignAssets", zb0006)
return
}
}
@@ -5494,14 +6079,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apap":
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0039 int
+ zb0039, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0030 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(config.MaxAvailableAppProgramLen))
+ if zb0039 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0039), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -5510,14 +6095,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apsu":
- var zb0031 int
- zb0031, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0040 int
+ zb0040, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0031 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(config.MaxAvailableAppProgramLen))
+ if zb0040 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0040), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -5582,8 +6167,12 @@ func (z *Transaction) Msgsize() (s int) {
s += (*z).ApplicationCallTxnFields.ForeignApps[zb0004].Msgsize()
}
s += 5 + msgp.ArrayHeaderSize
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].Msgsize()
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ s += 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ }
+ s += 5 + msgp.ArrayHeaderSize
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].Msgsize()
}
s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize()
return
@@ -5591,7 +6180,7 @@ func (z *Transaction) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Transaction) MsgIsZero() bool {
- return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
+ return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go
index ef4764d45..0ce6b29c3 100644
--- a/data/transactions/msgp_gen_test.go
+++ b/data/transactions/msgp_gen_test.go
@@ -314,6 +314,66 @@ func BenchmarkUnmarshalAssetTransferTxnFields(b *testing.B) {
}
}
+func TestMarshalUnmarshalBoxRef(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := BoxRef{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingBoxRef(t *testing.T) {
+ protocol.RunEncodingTest(t, &BoxRef{})
+}
+
+func BenchmarkMarshalMsgBoxRef(b *testing.B) {
+ v := BoxRef{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgBoxRef(b *testing.B) {
+ v := BoxRef{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalBoxRef(b *testing.B) {
+ v := BoxRef{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalEvalDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := EvalDelta{}
diff --git a/data/transactions/teal_test.go b/data/transactions/teal_test.go
index 990036895..e5920d0f1 100644
--- a/data/transactions/teal_test.go
+++ b/data/transactions/teal_test.go
@@ -192,7 +192,7 @@ func TestEvalDeltaEqual(t *testing.T) {
// TestUnchangedAllocBounds ensure that the allocbounds on EvalDelta have not
// changed. If they change, EvalDelta.checkAllocBounds must be changed, or at
// least reconsidered, as well. We must give plenty of thought to whether a new
-// allocound, used by new versions, is compatible with old code. If the change
+// allocbound, used by new versions, is compatible with old code. If the change
// can only show up in new protocol versions, it should be ok. But if we change
// a bound, it will go into effect immediately, not with Protocol upgrade. So we
// must be extremely careful that old protocol versions can not emit messages
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 6198a0663..a22abd7ff 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -377,12 +377,8 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
// Ensure requested action is valid
switch tx.OnCompletion {
- case NoOpOC:
- case OptInOC:
- case CloseOutOC:
- case ClearStateOC:
- case UpdateApplicationOC:
- case DeleteApplicationOC:
+ case NoOpOC, OptInOC, CloseOutOC, ClearStateOC, UpdateApplicationOC, DeleteApplicationOC:
+ /* ok */
default:
return fmt.Errorf("invalid application OnCompletion")
}
@@ -448,8 +444,12 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
return fmt.Errorf("tx.ForeignAssets too long, max number of foreign assets is %d", proto.MaxAppTxnForeignAssets)
}
+ if len(tx.Boxes) > proto.MaxAppBoxReferences {
+ return fmt.Errorf("tx.Boxes too long, max number of box references is %d", proto.MaxAppBoxReferences)
+ }
+
// Limit the sum of all types of references that bring in account records
- if len(tx.Accounts)+len(tx.ForeignApps)+len(tx.ForeignAssets) > proto.MaxAppTotalTxnReferences {
+ if len(tx.Accounts)+len(tx.ForeignApps)+len(tx.ForeignAssets)+len(tx.Boxes) > proto.MaxAppTotalTxnReferences {
return fmt.Errorf("tx references exceed MaxAppTotalTxnReferences = %d", proto.MaxAppTotalTxnReferences)
}
@@ -470,6 +470,13 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
return fmt.Errorf("app programs too long. max total len %d bytes", pages*proto.MaxAppTotalProgramLen)
}
+ for i, br := range tx.Boxes {
+ // recall 0 is the current app so indexes are shifted, thus test is for greater than, not gte.
+ if br.Index > uint64(len(tx.ForeignApps)) {
+ return fmt.Errorf("tx.Boxes[%d].Index is %d. Exceeds len(tx.ForeignApps)", i, br.Index)
+ }
+ }
+
if tx.LocalStateSchema.NumEntries() > proto.MaxLocalSchemaEntries {
return fmt.Errorf("tx.LocalStateSchema too large, max number of keys is %d", proto.MaxLocalSchemaEntries)
}
diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go
index bac43c22c..43467d3fd 100644
--- a/data/transactions/transaction_test.go
+++ b/data/transactions/transaction_test.go
@@ -273,6 +273,7 @@ func TestWellFormedErrors(t *testing.T) {
futureProto := config.Consensus[protocol.ConsensusFuture]
protoV27 := config.Consensus[protocol.ConsensusV27]
protoV28 := config.Consensus[protocol.ConsensusV28]
+ protoV32 := config.Consensus[protocol.ConsensusV32]
addr1, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA")
require.NoError(t, err)
v5 := []byte{0x05}
@@ -284,7 +285,6 @@ func TestWellFormedErrors(t *testing.T) {
}
usecases := []struct {
tx Transaction
- spec SpecialAddresses
proto config.ConsensusParams
expectedError error
}{
@@ -296,7 +296,6 @@ func TestWellFormedErrors(t *testing.T) {
Fee: basics.MicroAlgos{Raw: 100},
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: makeMinFeeErrorf("transaction had fee %d, which is less than the minimum %d", 100, curProto.MinTxnFee),
},
@@ -308,7 +307,6 @@ func TestWellFormedErrors(t *testing.T) {
Fee: basics.MicroAlgos{Raw: 100},
},
},
- spec: specialAddr,
proto: curProto,
},
{
@@ -321,7 +319,6 @@ func TestWellFormedErrors(t *testing.T) {
FirstValid: 105,
},
},
- spec: specialAddr,
proto: curProto,
expectedError: fmt.Errorf("transaction invalid range (%d--%d)", 105, 100),
},
@@ -339,7 +336,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("tx.ExtraProgramPages exceeds MaxExtraAppProgramPages = %d", protoV27.MaxExtraAppProgramPages),
},
@@ -353,7 +349,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte("Xjunk"),
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("approval program too long. max len 1024 bytes"),
},
@@ -367,7 +362,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte("Xjunk"),
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -380,7 +374,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte(strings.Repeat("X", 1025)),
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("app programs too long. max total len 2048 bytes"),
},
@@ -395,7 +388,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -410,7 +402,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ExtraProgramPages is immutable"),
},
@@ -428,7 +419,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 4,
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ExtraProgramPages exceeds MaxExtraAppProgramPages = %d", futureProto.MaxExtraAppProgramPages),
},
@@ -441,7 +431,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11},
},
},
- spec: specialAddr,
proto: protoV27,
},
{
@@ -453,7 +442,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11, 12},
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("tx.ForeignApps too long, max number of foreign apps is 2"),
},
@@ -466,7 +454,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11, 12, 13, 14, 15, 16, 17},
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -478,7 +465,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignAssets: []basics.AssetIndex{14, 15, 16, 17, 18, 19, 20, 21, 22},
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ForeignAssets too long, max number of foreign assets is 8"),
},
@@ -493,7 +479,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignAssets: []basics.AssetIndex{14, 15, 16, 17},
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx references exceed MaxAppTotalTxnReferences = 8"),
},
@@ -509,7 +494,6 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: protoV28,
expectedError: fmt.Errorf("app programs too long. max total len %d bytes", curProto.MaxAppProgramLen),
},
@@ -525,7 +509,6 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -543,13 +526,49 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: protoV28,
expectedError: fmt.Errorf("tx.ExtraProgramPages is immutable"),
},
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ },
+ },
+ proto: futureProto,
+ expectedError: fmt.Errorf("tx.Boxes[0].Index is 1. Exceeds len(tx.ForeignApps)"),
+ },
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: futureProto,
+ },
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: protoV32,
+ expectedError: fmt.Errorf("tx.Boxes too long, max number of box references is 0"),
+ },
}
for _, usecase := range usecases {
- err := usecase.tx.WellFormed(usecase.spec, usecase.proto)
+ err := usecase.tx.WellFormed(specialAddr, usecase.proto)
require.Equal(t, usecase.expectedError, err)
}
}
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 4d0bd2717..115bd9536 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -69,6 +69,52 @@ type GroupContext struct {
ledger logic.LedgerForSignature
}
+var errTxGroupInvalidFee = errors.New("txgroup fee requirement overflow")
+var errTxnSigHasNoSig = errors.New("signedtxn has no sig")
+var errTxnSigNotWellFormed = errors.New("signedtxn should only have one of Sig or Msig or LogicSig")
+var errRekeyingNotSupported = errors.New("nonempty AuthAddr but rekeying is not supported")
+var errUnknownSignature = errors.New("has one mystery sig. WAT?")
+
+// TxGroupErrorReason is reason code for ErrTxGroupError
+type TxGroupErrorReason int
+
+const (
+ // TxGroupErrorReasonGeneric is a generic (not tracked) reason code
+ TxGroupErrorReasonGeneric TxGroupErrorReason = iota
+ // TxGroupErrorReasonNotWellFormed is txn.WellFormed failure
+ TxGroupErrorReasonNotWellFormed
+ // TxGroupErrorReasonInvalidFee is invalid fee pooling in transaction group
+ TxGroupErrorReasonInvalidFee
+ // TxGroupErrorReasonHasNoSig is for transaction without any signature
+ TxGroupErrorReasonHasNoSig
+ // TxGroupErrorReasonSigNotWellFormed defines signature format errors
+ TxGroupErrorReasonSigNotWellFormed
+ // TxGroupErrorReasonMsigNotWellFormed defines multisig format errors
+ TxGroupErrorReasonMsigNotWellFormed
+ // TxGroupErrorReasonLogicSigFailed defines logic sig validation errors
+ TxGroupErrorReasonLogicSigFailed
+
+ // TxGroupErrorReasonNumValues is number of enum values
+ TxGroupErrorReasonNumValues
+)
+
+// ErrTxGroupError is an error from txn pre-validation (well form-ness, signature format, etc).
+// It can be unwrapped into underlying error, as well as has a specific failure reason code.
+type ErrTxGroupError struct {
+ err error
+ Reason TxGroupErrorReason
+}
+
+// Error returns an error message from the underlying error
+func (e *ErrTxGroupError) Error() string {
+ return e.err.Error()
+}
+
+// Unwrap returns an underlying error
+func (e *ErrTxGroupError) Unwrap() error {
+ return e.err
+}
+
// PrepareGroupContext prepares a verification group parameter object for a given transaction
// group.
func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, ledger logic.LedgerForSignature) (*GroupContext, error) {
@@ -101,14 +147,14 @@ func (g *GroupContext) Equal(other *GroupContext) bool {
// txnBatchPrep verifies a SignedTxn having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
-// it is the caller responsibility to call batchVerifier.Verify()
-func txnBatchPrep(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier) error {
+// It is the caller responsibility to call batchVerifier.Verify().
+func txnBatchPrep(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier) *ErrTxGroupError {
if !groupCtx.consensusParams.SupportRekeying && (s.AuthAddr != basics.Address{}) {
- return errors.New("nonempty AuthAddr but rekeying is not supported")
+ return &ErrTxGroupError{err: errRekeyingNotSupported, Reason: TxGroupErrorReasonGeneric}
}
if err := s.Txn.WellFormed(groupCtx.specAddrs, groupCtx.consensusParams); err != nil {
- return err
+ return &ErrTxGroupError{err: err, Reason: TxGroupErrorReasonNotWellFormed}
}
return stxnCoreChecks(s, txnIdx, groupCtx, verifier)
@@ -135,8 +181,8 @@ func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader,
// txnGroupBatchPrep verifies a []SignedTxn having no obviously inconsistent data.
// it is the caller responsibility to call batchVerifier.Verify()
-func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, ledger logic.LedgerForSignature, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
- groupCtx, err = PrepareGroupContext(stxs, contextHdr, ledger)
+func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, ledger logic.LedgerForSignature, verifier *crypto.BatchVerifier) (*GroupContext, error) {
+ groupCtx, err := PrepareGroupContext(stxs, contextHdr, ledger)
if err != nil {
return nil, err
}
@@ -144,10 +190,11 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr bookkeeping.Blo
minFeeCount := uint64(0)
feesPaid := uint64(0)
for i, stxn := range stxs {
- err = txnBatchPrep(&stxn, i, groupCtx, verifier)
- if err != nil {
- err = fmt.Errorf("transaction %+v invalid : %w", stxn, err)
- return
+ prepErr := txnBatchPrep(&stxn, i, groupCtx, verifier)
+ if prepErr != nil {
+ // re-wrap the error with more details
+ prepErr.err = fmt.Errorf("transaction %+v invalid : %w", stxn, prepErr.err)
+ return nil, prepErr
}
if stxn.Txn.Type != protocol.StateProofTx {
minFeeCount++
@@ -156,22 +203,27 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr bookkeeping.Blo
}
feeNeeded, overflow := basics.OMul(groupCtx.consensusParams.MinTxnFee, minFeeCount)
if overflow {
- err = fmt.Errorf("txgroup fee requirement overflow")
- return
+ err = &ErrTxGroupError{err: errTxGroupInvalidFee, Reason: TxGroupErrorReasonInvalidFee}
+ return nil, err
}
// feesPaid may have saturated. That's ok. Since we know
// feeNeeded did not overflow, simple comparison tells us
// feesPaid was enough.
if feesPaid < feeNeeded {
- err = fmt.Errorf("txgroup had %d in fees, which is less than the minimum %d * %d",
- feesPaid, minFeeCount, groupCtx.consensusParams.MinTxnFee)
- return
+ err = &ErrTxGroupError{
+ err: fmt.Errorf(
+ "txgroup had %d in fees, which is less than the minimum %d * %d",
+ feesPaid, minFeeCount, groupCtx.consensusParams.MinTxnFee),
+ Reason: TxGroupErrorReasonInvalidFee,
+ }
+ return nil, err
}
- return
+ return groupCtx, nil
}
-func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
+// stxnCoreChecks runs signatures validity checks and enqueues signature into batchVerifier for verification.
+func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) *ErrTxGroupError {
numSigs := 0
hasSig := false
hasMsig := false
@@ -196,11 +248,10 @@ func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
if s.Txn.Sender == transactions.StateProofSender && s.Txn.Type == protocol.StateProofTx {
return nil
}
-
- return errors.New("signedtxn has no sig")
+ return &ErrTxGroupError{err: errTxnSigHasNoSig, Reason: TxGroupErrorReasonHasNoSig}
}
if numSigs > 1 {
- return errors.New("signedtxn should only have one of Sig or Msig or LogicSig")
+ return &ErrTxGroupError{err: errTxnSigNotWellFormed, Reason: TxGroupErrorReasonSigNotWellFormed}
}
if hasSig {
@@ -209,14 +260,17 @@ func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
}
if hasMsig {
if err := crypto.MultisigBatchPrep(s.Txn, crypto.Digest(s.Authorizer()), s.Msig, batchVerifier); err != nil {
- return fmt.Errorf("multisig validation failed: %w", err)
+ return &ErrTxGroupError{err: fmt.Errorf("multisig validation failed: %w", err), Reason: TxGroupErrorReasonMsigNotWellFormed}
}
return nil
}
if hasLogicSig {
- return logicSigVerify(s, txnIdx, groupCtx)
+ if err := logicSigVerify(s, txnIdx, groupCtx); err != nil {
+ return &ErrTxGroupError{err: err, Reason: TxGroupErrorReasonLogicSigFailed}
+ }
+ return nil
}
- return errors.New("has one mystery sig. WAT?")
+ return &ErrTxGroupError{err: errUnknownSignature, Reason: TxGroupErrorReasonGeneric}
}
// LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed.
@@ -254,7 +308,7 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
}
if groupIndex < 0 {
- return errors.New("Negative groupIndex")
+ return errors.New("negative groupIndex")
}
txngroup := transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns)
ep := logic.EvalParams{
@@ -310,7 +364,7 @@ func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *Group
}
if groupIndex < 0 {
- return errors.New("Negative groupIndex")
+ return errors.New("negative groupIndex")
}
ep := logic.EvalParams{
Proto: &groupCtx.consensusParams,
diff --git a/data/txHandler.go b/data/txHandler.go
index cd4c25c8e..8580e220f 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -19,9 +19,11 @@ package data
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"sync"
+ "time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -45,6 +47,20 @@ var txBacklogSize = config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnByt
var transactionMessagesHandled = metrics.MakeCounter(metrics.TransactionMessagesHandled)
var transactionMessagesDroppedFromBacklog = metrics.MakeCounter(metrics.TransactionMessagesDroppedFromBacklog)
var transactionMessagesDroppedFromPool = metrics.MakeCounter(metrics.TransactionMessagesDroppedFromPool)
+var transactionMessagesAlreadyCommitted = metrics.MakeCounter(metrics.TransactionMessagesAlreadyCommitted)
+var transactionMessagesTxGroupInvalidFee = metrics.MakeCounter(metrics.TransactionMessagesTxGroupInvalidFee)
+var transactionMessagesTxnNotWellFormed = metrics.MakeCounter(metrics.TransactionMessagesTxnNotWellFormed)
+var transactionMessagesTxnSigNotWellFormed = metrics.MakeCounter(metrics.TransactionMessagesTxnSigNotWellFormed)
+var transactionMessagesTxnMsigNotWellFormed = metrics.MakeCounter(metrics.TransactionMessagesTxnMsigNotWellFormed)
+var transactionMessagesTxnLogicSig = metrics.MakeCounter(metrics.TransactionMessagesTxnLogicSig)
+var transactionMessagesTxnSigVerificationFailed = metrics.MakeCounter(metrics.TransactionMessagesTxnSigVerificationFailed)
+var transactionMessagesBacklogErr = metrics.MakeCounter(metrics.TransactionMessagesBacklogErr)
+var transactionMessagesRemember = metrics.MakeCounter(metrics.TransactionMessagesRemember)
+var transactionMessagesBacklogSizeGauge = metrics.MakeGauge(metrics.TransactionMessagesBacklogSize)
+
+var transactionGroupTxSyncHandled = metrics.MakeCounter(metrics.TransactionGroupTxSyncHandled)
+var transactionGroupTxSyncRemember = metrics.MakeCounter(metrics.TransactionGroupTxSyncRemember)
+var transactionGroupTxSyncAlreadyCommitted = metrics.MakeCounter(metrics.TransactionGroupTxSyncAlreadyCommitted)
// The txBacklogMsg structure used to track a single incoming transaction from the gossip network,
type txBacklogMsg struct {
@@ -91,18 +107,18 @@ func MakeTxHandler(txPool *pools.TransactionPool, ledger *Ledger, net network.Go
postVerificationQueue: make(chan *txBacklogMsg, txBacklogSize),
net: net,
}
-
- handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
return handler
}
// Start enables the processing of incoming messages at the transaction handler
func (handler *TxHandler) Start() {
+ handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
handler.net.RegisterHandlers([]network.TaggedMessageHandler{
{Tag: protocol.TxnTag, MessageHandler: network.HandlerFunc(handler.processIncomingTxn)},
})
- handler.backlogWg.Add(1)
+ handler.backlogWg.Add(2)
go handler.backlogWorker()
+ go handler.backlogGaugeThread()
}
// Stop suspends the processing of incoming messages at the transaction handler
@@ -113,15 +129,31 @@ func (handler *TxHandler) Stop() {
func reencode(stxns []transactions.SignedTxn) []byte {
var result [][]byte
- for _, stxn := range stxns {
- result = append(result, protocol.Encode(&stxn))
+ for i := range stxns {
+ result = append(result, protocol.Encode(&stxns[i]))
}
return bytes.Join(result, nil)
}
+func (handler *TxHandler) backlogGaugeThread() {
+ defer handler.backlogWg.Done()
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ transactionMessagesBacklogSizeGauge.Set(float64(len(handler.backlogQueue)))
+ case <-handler.ctx.Done():
+ return
+ }
+ }
+}
+
// backlogWorker is the worker go routine that process the incoming messages from the postVerificationQueue and backlogQueue channels
// and dispatches them further.
func (handler *TxHandler) backlogWorker() {
+ // Note: TestIncomingTxHandle and TestIncomingTxGroupHandle emulate this function.
+ // Changes to the behavior in this function should be reflected in the test.
defer handler.backlogWg.Done()
for {
// prioritize the postVerificationQueue
@@ -130,7 +162,7 @@ func (handler *TxHandler) backlogWorker() {
if !ok {
return
}
- handler.postprocessCheckedTxn(wi)
+ handler.postProcessCheckedTxn(wi)
// restart the loop so that we could empty out the post verification queue.
continue
@@ -144,6 +176,7 @@ func (handler *TxHandler) backlogWorker() {
return
}
if handler.checkAlreadyCommitted(wi) {
+ transactionMessagesAlreadyCommitted.Inc(nil)
continue
}
@@ -154,7 +187,7 @@ func (handler *TxHandler) backlogWorker() {
if !ok {
return
}
- handler.postprocessCheckedTxn(wi)
+ handler.postProcessCheckedTxn(wi)
case <-handler.ctx.Done():
return
@@ -162,9 +195,39 @@ func (handler *TxHandler) backlogWorker() {
}
}
-func (handler *TxHandler) postprocessCheckedTxn(wi *txBacklogMsg) {
+func (handler *TxHandler) postProcessReportErrors(err error) {
+ if errors.Is(err, crypto.ErrBatchVerificationFailed) {
+ transactionMessagesTxnSigVerificationFailed.Inc(nil)
+ return
+ }
+
+ var txGroupErr *verify.ErrTxGroupError
+ if errors.As(err, &txGroupErr) {
+ switch txGroupErr.Reason {
+ case verify.TxGroupErrorReasonNotWellFormed:
+ transactionMessagesTxnNotWellFormed.Inc(nil)
+ case verify.TxGroupErrorReasonInvalidFee:
+ transactionMessagesTxGroupInvalidFee.Inc(nil)
+ case verify.TxGroupErrorReasonHasNoSig:
+ fallthrough
+ case verify.TxGroupErrorReasonSigNotWellFormed:
+ transactionMessagesTxnSigNotWellFormed.Inc(nil)
+ case verify.TxGroupErrorReasonMsigNotWellFormed:
+ transactionMessagesTxnMsigNotWellFormed.Inc(nil)
+ case verify.TxGroupErrorReasonLogicSigFailed:
+ transactionMessagesTxnLogicSig.Inc(nil)
+ default:
+ transactionMessagesBacklogErr.Inc(nil)
+ }
+ } else {
+ transactionMessagesBacklogErr.Inc(nil)
+ }
+}
+
+func (handler *TxHandler) postProcessCheckedTxn(wi *txBacklogMsg) {
if wi.verificationErr != nil {
// disconnect from peer.
+ handler.postProcessReportErrors(wi.verificationErr)
logging.Base().Warnf("Received a malformed tx group %v: %v", wi.unverifiedTxGroup, wi.verificationErr)
handler.net.Disconnect(wi.rawmsg.Sender)
return
@@ -183,6 +246,8 @@ func (handler *TxHandler) postprocessCheckedTxn(wi *txBacklogMsg) {
return
}
+ transactionMessagesRemember.Inc(nil)
+
// if we remembered without any error ( i.e. txpool wasn't full ), then we should pin these transactions.
err = handler.ledger.VerifiedTransactionCache().Pin(verifiedTxGroup)
if err != nil {
@@ -201,7 +266,7 @@ func (handler *TxHandler) asyncVerifySignature(arg interface{}) interface{} {
latest := handler.ledger.Latest()
latestHdr, err := handler.ledger.BlockHdr(latest)
if err != nil {
- tx.verificationErr = fmt.Errorf("Could not get header for previous block %d: %w", latest, err)
+ tx.verificationErr = fmt.Errorf("could not get header for previous block %d: %w", latest, err)
logging.Base().Warnf("Could not get header for previous block %d: %v", latest, err)
} else {
// we can't use PaysetGroups here since it's using a execpool like this go-routine and we don't want to deadlock.
@@ -286,7 +351,10 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
tx := &txBacklogMsg{
unverifiedTxGroup: unverifiedTxGroup,
}
+ transactionGroupTxSyncHandled.Inc(nil)
+
if handler.checkAlreadyCommitted(tx) {
+ transactionGroupTxSyncAlreadyCommitted.Inc(nil)
return network.OutgoingMessage{}, true
}
@@ -317,6 +385,8 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
return network.OutgoingMessage{}, true
}
+ transactionGroupTxSyncRemember.Inc(nil)
+
// if we remembered without any error ( i.e. txpool wasn't full ), then we should pin these transactions.
err = handler.ledger.VerifiedTransactionCache().Pin(verifiedTxGroup)
if err != nil {
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 14a5495eb..46ea21562 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -17,9 +17,14 @@
package data
import (
+ "context"
+ "encoding/binary"
+ "errors"
"fmt"
"io"
"math/rand"
+ "strings"
+ "sync"
"testing"
"time"
@@ -38,6 +43,7 @@ import (
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
+ "github.com/algorand/go-algorand/util/metrics"
)
func BenchmarkTxHandlerProcessing(b *testing.B) {
@@ -248,3 +254,406 @@ func BenchmarkTxHandlerDecoderMsgp(b *testing.B) {
require.Equal(b, benchTxnNum, idx)
}
}
+
+func TestIncomingTxHandle(t *testing.T) {
+ incomingTxHandlerProcessing(1, t)
+}
+
+func TestIncomingTxGroupHandle(t *testing.T) {
+ incomingTxHandlerProcessing(proto.MaxTxGroupSize, t)
+}
+
+// incomingTxHandlerProcessing is a comprehensive transaction handling test
+// It handles the singed transactions by passing them to the backlog for verification
+func incomingTxHandlerProcessing(maxGroupSize int, t *testing.T) {
+ const numUsers = 100
+ numberOfTransactionGroups := 1000
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ addresses := make([]basics.Address, numUsers)
+ secrets := make([]*crypto.SignatureSecrets, numUsers)
+
+ // prepare the accounts
+ genesis := make(map[basics.Address]basics.AccountData)
+ for i := 0; i < numUsers; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ genesis[addr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
+ }
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.NotParticipating,
+ MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance},
+ }
+
+ require.Equal(t, len(genesis), numUsers+1)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ ledgerName := fmt.Sprintf("%s-mem-%d", t.Name(), numberOfTransactionGroups)
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
+ require.NoError(t, err)
+
+ l := ledger
+ tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ handler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+ // since Start is not called, set the context here
+ handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
+ defer handler.ctxCancel()
+
+ outChan := make(chan *txBacklogMsg, 10)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ // Make a test backlog worker, which is simiar to backlogWorker, but sends the results
+ // through the outChan instead of passing it to postprocessCheckedTxn
+ go func() {
+ defer wg.Done()
+ defer close(outChan)
+ for {
+ // prioritize the postVerificationQueue
+ select {
+ case wi, ok := <-handler.postVerificationQueue:
+ if !ok {
+ return
+ }
+ outChan <- wi
+ // restart the loop so that we could empty out the post verification queue.
+ continue
+ default:
+ }
+
+ // we have no more post verification items. wait for either backlog queue item or post verification item.
+ select {
+ case wi, ok := <-handler.backlogQueue:
+ if !ok {
+ // shut down to end the test
+ handler.txVerificationPool.Shutdown()
+ close(handler.postVerificationQueue)
+ // wait until all the pending responses are obtained.
+ // this is not in backlogWorker, maybe should be
+ for wi := range handler.postVerificationQueue {
+ outChan <- wi
+ }
+ return
+ }
+ if handler.checkAlreadyCommitted(wi) {
+ // this is not expected during the test
+ continue
+ }
+
+ // enqueue the task to the verification pool.
+ handler.txVerificationPool.EnqueueBacklog(handler.ctx, handler.asyncVerifySignature, wi, nil)
+
+ case wi, ok := <-handler.postVerificationQueue:
+ if !ok {
+ return
+ }
+ outChan <- wi
+
+ case <-handler.ctx.Done():
+ return
+ }
+ }
+ }()
+
+ // Prepare the transactions
+ signedTransactionGroups, badTxnGroups :=
+ makeSignedTxnGroups(numberOfTransactionGroups, numUsers, maxGroupSize, 0.5, addresses, secrets)
+ encodedSignedTransactionGroups := make([]network.IncomingMessage, 0, numberOfTransactionGroups)
+ for _, stxngrp := range signedTransactionGroups {
+ data := make([]byte, 0)
+ for _, stxn := range stxngrp {
+ data = append(data, protocol.Encode(&stxn)...)
+ }
+ encodedSignedTransactionGroups =
+ append(encodedSignedTransactionGroups, network.IncomingMessage{Data: data})
+ }
+
+ // Process the results and make sure they are correct
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ groupCounter := 0
+ txnCounter := 0
+ invalidCounter := 0
+ defer func() {
+ t.Logf("processed %d txn groups (%d txns)\n", groupCounter, txnCounter)
+ }()
+ for wi := range outChan {
+ txnCounter = txnCounter + len(wi.unverifiedTxGroup)
+ groupCounter++
+ u, _ := binary.Uvarint(wi.unverifiedTxGroup[0].Txn.Note)
+ _, inBad := badTxnGroups[u]
+ if wi.verificationErr == nil {
+ require.False(t, inBad, "No error for invalid signature")
+ } else {
+ invalidCounter++
+ require.True(t, inBad, "Error for good signature")
+ }
+ }
+ t.Logf("Txn groups with invalid sigs: %d\n", invalidCounter)
+ }()
+
+ // Send the transactions to the verifier
+ for _, tg := range encodedSignedTransactionGroups {
+ handler.processIncomingTxn(tg)
+ randduration := time.Duration(uint64(((1 + rand.Float32()) * 3)))
+ time.Sleep(randduration * time.Microsecond)
+ }
+ close(handler.backlogQueue)
+ wg.Wait()
+
+ // Report the number of transactions dropped because the backlog was busy
+ var buf strings.Builder
+ metrics.DefaultRegistry().WriteMetrics(&buf, "")
+ str := buf.String()
+ x := strings.Index(str, "\nalgod_transaction_messages_dropped_backlog")
+ str = str[x+44 : x+44+strings.Index(str[x+44:], "\n")]
+ str = strings.TrimSpace(strings.ReplaceAll(str, "}", " "))
+ t.Logf("dropped %s txn gropus\n", str)
+}
+
+// makeSignedTxnGroups prepares N transaction groups of random (maxGroupSize) sizes with random
+// invalid signatures of a given probability (invalidProb)
+func makeSignedTxnGroups(N, numUsers, maxGroupSize int, invalidProb float32, addresses []basics.Address,
+ secrets []*crypto.SignatureSecrets) (ret [][]transactions.SignedTxn,
+ badTxnGroups map[uint64]interface{}) {
+ badTxnGroups = make(map[uint64]interface{})
+
+ protoMaxGrpSize := proto.MaxTxGroupSize
+ ret = make([][]transactions.SignedTxn, 0, N)
+ for u := 0; u < N; u++ {
+ grpSize := rand.Intn(protoMaxGrpSize-1) + 1
+ if grpSize > maxGroupSize {
+ grpSize = maxGroupSize
+ }
+ var txGroup transactions.TxGroup
+ txns := make([]transactions.Transaction, 0, grpSize)
+ for g := 0; g < grpSize; g++ {
+ // generate transactions
+ noteField := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(noteField, uint64(u))
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addresses[(u+g)%numUsers],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ GenesisHash: genesisHash,
+ Note: noteField,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addresses[(u+g+1)%numUsers],
+ Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)},
+ },
+ }
+ txGroup.TxGroupHashes = append(txGroup.TxGroupHashes, crypto.Digest(tx.ID()))
+ txns = append(txns, tx)
+ }
+ groupHash := crypto.HashObj(txGroup)
+ signedTxGroup := make([]transactions.SignedTxn, 0, grpSize)
+ for g, txn := range txns {
+ txn.Group = groupHash
+ signedTx := txn.Sign(secrets[(u+g)%numUsers])
+ signedTx.Txn = txn
+ signedTxGroup = append(signedTxGroup, signedTx)
+ }
+ // randomly make bad signatures
+ if rand.Float32() < invalidProb {
+ tinGrp := rand.Intn(grpSize)
+ signedTxGroup[tinGrp].Sig[0] = signedTxGroup[tinGrp].Sig[0] + 1
+ badTxnGroups[uint64(u)] = struct{}{}
+ }
+ ret = append(ret, signedTxGroup)
+ }
+ return
+}
+
+// BenchmarkHandler sends singed transactions the the verifier
+func BenchmarkHandleTxns(b *testing.B) {
+ b.N = b.N * proto.MaxTxGroupSize / 2
+ runHandlerBenchmark(1, b)
+}
+
+// BenchmarkHandler sends singed transaction groups to the verifier
+func BenchmarkHandleTxnGroups(b *testing.B) {
+ runHandlerBenchmark(proto.MaxTxGroupSize, b)
+}
+
+// runHandlerBenchmark has a similar workflow to incomingTxHandlerProcessing,
+// but bypasses the backlog, and sends the transactions directly to the verifier
+func runHandlerBenchmark(maxGroupSize int, b *testing.B) {
+ const numUsers = 100
+ log := logging.TestingLog(b)
+ log.SetLevel(logging.Warn)
+ addresses := make([]basics.Address, numUsers)
+ secrets := make([]*crypto.SignatureSecrets, numUsers)
+
+ // prepare the accounts
+ genesis := make(map[basics.Address]basics.AccountData)
+ for i := 0; i < numUsers; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ genesis[addr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
+ }
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.NotParticipating,
+ MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance},
+ }
+
+ require.Equal(b, len(genesis), numUsers+1)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ ledgerName := fmt.Sprintf("%s-mem-%d", b.Name(), b.N)
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
+ require.NoError(b, err)
+
+ l := ledger
+ tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ handler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+ // since Start is not called, set the context here
+ handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
+ defer handler.ctxCancel()
+
+ // Prepare the transactions
+ signedTransactionGroups, badTxnGroups := makeSignedTxnGroups(b.N, numUsers, maxGroupSize, 0.001, addresses, secrets)
+ outChan := handler.postVerificationQueue
+ wg := sync.WaitGroup{}
+
+ var tt time.Time
+ // Process the results and make sure they are correct
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ groupCounter := 0
+ var txnCounter uint64
+ invalidCounter := 0
+ for wi := range outChan {
+ txnCounter = txnCounter + uint64(len(wi.unverifiedTxGroup))
+ groupCounter++
+ u, _ := binary.Uvarint(wi.unverifiedTxGroup[0].Txn.Note)
+ _, inBad := badTxnGroups[u]
+ if wi.verificationErr == nil {
+ require.False(b, inBad, "No error for invalid signature")
+ } else {
+ invalidCounter++
+ require.True(b, inBad, "Error for good signature")
+ }
+ }
+ if txnCounter > 0 {
+ b.Logf("TPS: %d\n", uint64(txnCounter)*1000000000/uint64(time.Since(tt)))
+ b.Logf("Time/txn: %d(microsec)\n", uint64((time.Since(tt)/time.Microsecond))/txnCounter)
+ b.Logf("processed total: [%d groups (%d invalid)] [%d txns]\n", groupCounter, invalidCounter, txnCounter)
+ }
+ }()
+
+ b.ResetTimer()
+ tt = time.Now()
+ for _, stxngrp := range signedTransactionGroups {
+ blm := txBacklogMsg{rawmsg: nil, unverifiedTxGroup: stxngrp}
+ handler.txVerificationPool.EnqueueBacklog(handler.ctx, handler.asyncVerifySignature, &blm, nil)
+ }
+ // shut down to end the test
+ handler.txVerificationPool.Shutdown()
+ close(handler.postVerificationQueue)
+ close(handler.backlogQueue)
+ wg.Wait()
+}
+
+func TestTxHandlerPostProcessError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ collect := func() map[string]float64 {
+ // collect all specific error reason metrics except TxGroupErrorReasonNotWellFormed,
+ // it is tested in TestPostProcessErrorWithVerify
+ result := map[string]float64{}
+ transactionMessagesTxnSigVerificationFailed.AddMetric(result)
+ transactionMessagesAlreadyCommitted.AddMetric(result)
+ transactionMessagesTxGroupInvalidFee.AddMetric(result)
+ // transactionMessagesTxnNotWellFormed.AddMetric(result)
+ transactionMessagesTxnSigNotWellFormed.AddMetric(result)
+ transactionMessagesTxnMsigNotWellFormed.AddMetric(result)
+ transactionMessagesTxnLogicSig.AddMetric(result)
+ return result
+ }
+ var txh TxHandler
+
+ errSome := errors.New("some error")
+ txh.postProcessReportErrors(errSome)
+ result := collect()
+ require.Len(t, result, 0)
+ transactionMessagesBacklogErr.AddMetric(result)
+ require.Len(t, result, 1)
+
+ counter := 0
+ for i := verify.TxGroupErrorReasonGeneric; i <= verify.TxGroupErrorReasonLogicSigFailed; i++ {
+ if i == verify.TxGroupErrorReasonNotWellFormed {
+ // skip TxGroupErrorReasonNotWellFormed, tested in TestPostProcessErrorWithVerify.
+ // the test uses global metric counters, skipping makes the test deterministic
+ continue
+ }
+
+ errTxGroup := &verify.ErrTxGroupError{Reason: i}
+ txh.postProcessReportErrors(errTxGroup)
+ result = collect()
+ if i == verify.TxGroupErrorReasonSigNotWellFormed {
+ // TxGroupErrorReasonSigNotWellFormed and TxGroupErrorReasonHasNoSig increment the same metric
+ counter--
+ require.Equal(t, result[metrics.TransactionMessagesTxnSigNotWellFormed.Name], float64(2))
+ }
+ require.Len(t, result, counter)
+ counter++
+ }
+
+ // there are one less metrics than number of tracked values,
+ // plus one generic non-tracked value, plus skipped TxGroupErrorReasonNotWellFormed
+ const expected = int(verify.TxGroupErrorReasonNumValues) - 3
+ require.Len(t, result, expected)
+
+ errVerify := crypto.ErrBatchVerificationFailed
+ txh.postProcessReportErrors(errVerify)
+ result = collect()
+ require.Len(t, result, expected+1)
+}
+
+func TestTxHandlerPostProcessErrorWithVerify(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ txn := transactions.Transaction{}
+ stxn := transactions.SignedTxn{Txn: txn}
+
+ hdr := bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ }
+ _, err := verify.TxnGroup([]transactions.SignedTxn{stxn}, hdr, nil, nil)
+ var txGroupErr *verify.ErrTxGroupError
+ require.ErrorAs(t, err, &txGroupErr)
+
+ result := map[string]float64{}
+ transactionMessagesTxnNotWellFormed.AddMetric(result)
+ require.Len(t, result, 0)
+
+ var txh TxHandler
+ txh.postProcessReportErrors(err)
+ transactionMessagesTxnNotWellFormed.AddMetric(result)
+ require.Len(t, result, 1)
+}
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
index 988753a0d..cab0ded5c 100644
--- a/data/txntest/txn.go
+++ b/data/txntest/txn.go
@@ -95,6 +95,7 @@ type Txn struct {
Accounts []basics.Address
ForeignApps []basics.AppIndex
ForeignAssets []basics.AssetIndex
+ Boxes []transactions.BoxRef
LocalStateSchema basics.StateSchema
GlobalStateSchema basics.StateSchema
ApprovalProgram interface{} // string, nil, or []bytes if already compiled
@@ -106,12 +107,50 @@ type Txn struct {
StateProofMsg stateproofmsg.Message
}
+// internalCopy "finishes" a shallow copy done by a simple Go assignment by
+// copying all of the slice fields
+func (tx *Txn) internalCopy() {
+ tx.Note = append([]byte(nil), tx.Note...)
+ if tx.ApplicationArgs != nil {
+ tx.ApplicationArgs = append([][]byte(nil), tx.ApplicationArgs...)
+ for i := range tx.ApplicationArgs {
+ tx.ApplicationArgs[i] = append([]byte(nil), tx.ApplicationArgs[i]...)
+ }
+ }
+ tx.Accounts = append([]basics.Address(nil), tx.Accounts...)
+ tx.ForeignApps = append([]basics.AppIndex(nil), tx.ForeignApps...)
+ tx.ForeignAssets = append([]basics.AssetIndex(nil), tx.ForeignAssets...)
+ tx.Boxes = append([]transactions.BoxRef(nil), tx.Boxes...)
+ for i := 0; i < len(tx.Boxes); i++ {
+ tx.Boxes[i].Name = append([]byte(nil), tx.Boxes[i].Name...)
+ }
+
+ // Programs may or may not actually be byte slices. The other
+ // possibilitiues don't require copies.
+ if program, ok := tx.ApprovalProgram.([]byte); ok {
+ tx.ApprovalProgram = append([]byte(nil), program...)
+ }
+ if program, ok := tx.ClearStateProgram.([]byte); ok {
+ tx.ClearStateProgram = append([]byte(nil), program...)
+ }
+}
+
// Noted returns a new Txn with the given note field.
-func (tx *Txn) Noted(note string) *Txn {
- copy := &Txn{}
- *copy = *tx
- copy.Note = []byte(note)
- return copy
+func (tx Txn) Noted(note string) *Txn {
+ tx.internalCopy()
+ tx.Note = []byte(note)
+ return &tx
+}
+
+// Args returns a new Txn with the given strings as app args
+func (tx Txn) Args(strings ...string) *Txn {
+ tx.internalCopy()
+ bytes := make([][]byte, len(strings))
+ for i, s := range strings {
+ bytes[i] = []byte(s)
+ }
+ tx.ApplicationArgs = bytes
+ return &tx
}
// FillDefaults populates some obvious defaults from config params,
@@ -235,6 +274,7 @@ func (tx Txn) Txn() transactions.Transaction {
Accounts: tx.Accounts,
ForeignApps: tx.ForeignApps,
ForeignAssets: tx.ForeignAssets,
+ Boxes: tx.Boxes,
LocalStateSchema: tx.LocalStateSchema,
GlobalStateSchema: tx.GlobalStateSchema,
ApprovalProgram: assemble(tx.ApprovalProgram),
diff --git a/docker/releases/build_releases.sh b/docker/releases/build_releases.sh
index 01d5f5372..1862c7ae9 100755
--- a/docker/releases/build_releases.sh
+++ b/docker/releases/build_releases.sh
@@ -78,7 +78,7 @@ esac
IFS='' read -r -d '' DOCKERFILE <<EOF
FROM ubuntu
-RUN apt-get update && apt-get install -y ca-certificates curl --no-install-recommends && \
+RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y ca-certificates curl --no-install-recommends && \
curl --silent -L https://github.com/algorand/go-algorand-doc/blob/master/downloads/installers/linux_amd64/install_master_linux-amd64.tar.gz?raw=true | tar xzf - && \
./update.sh -c $CHANNEL -n -p ~/node -d ~/node/data -i -g $NETWORK
WORKDIR /root/node
diff --git a/gen/generate.go b/gen/generate.go
index 15eb09103..9274da420 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -149,14 +149,28 @@ func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusPro
return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error())
}
- return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, genesisData.DevMode, verboseOut)
+ return generateGenesisFiles(
+ proto, consensusParams, allocation, genesisData, outDir, verboseOut,
+ )
}
-func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, netName string, schemaVersionModifier string,
- allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, devmode bool, verboseOut io.Writer) (err error) {
+func generateGenesisFiles(protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, allocation []genesisAllocation, genData GenesisData, outDir string, verboseOut io.Writer) (err error) {
- genesisAddrs := make(map[string]basics.Address)
- records := make(map[string]basics.AccountData)
+ var (
+ netName = genData.NetworkName
+ schemaVersionModifier = genData.VersionModifier
+ firstWalletValid = genData.FirstPartKeyRound
+ lastWalletValid = genData.LastPartKeyRound
+ partKeyDilution = genData.PartKeyDilution
+ feeSink = genData.FeeSink
+ rewardsPool = genData.RewardsPool
+ devmode = genData.DevMode
+ rewardsBalance = genData.RewardsPoolBalance
+ comment = genData.Comment
+
+ genesisAddrs = make(map[string]basics.Address)
+ records = make(map[string]basics.AccountData)
+ )
if partKeyDilution == 0 {
partKeyDilution = protoParams.DefaultKeyDilution
@@ -326,24 +340,27 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
fmt.Fprintln(verboseOut, protoVersion, protoParams.MinBalance)
}
+ if rewardsBalance < protoParams.MinBalance {
+ // Needs to at least have min balance
+ rewardsBalance = protoParams.MinBalance
+ }
+
records["FeeSink"] = basics.AccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: protoParams.MinBalance},
}
+
records["RewardsPool"] = basics.AccountData{
Status: basics.NotParticipating,
- MicroAlgos: basics.MicroAlgos{Raw: defaultIncentivePoolBalanceAtInception},
+ MicroAlgos: basics.MicroAlgos{Raw: rewardsBalance},
}
+ // Add FeeSink and RewardsPool to allocation slice to be handled with other allocations.
sinkAcct := genesisAllocation{
- Name: "FeeSink",
- Stake: protoParams.MinBalance,
- Online: basics.NotParticipating,
+ Name: "FeeSink",
}
poolAcct := genesisAllocation{
- Name: "RewardsPool",
- Stake: defaultIncentivePoolBalanceAtInception,
- Online: basics.NotParticipating,
+ Name: "RewardsPool",
}
alloc2 := make([]genesisAllocation, 0, len(allocation)+2)
diff --git a/gen/generate_test.go b/gen/generate_test.go
index fbffe9467..64c393b5b 100644
--- a/gen/generate_test.go
+++ b/gen/generate_test.go
@@ -17,19 +17,26 @@
package gen
import (
+ "encoding/json"
"fmt"
+ "github.com/algorand/go-algorand/data/basics"
+ "io"
+ "os"
"path/filepath"
"strings"
"sync"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/db"
-
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/util/db"
)
func TestLoadMultiRootKeyConcurrent(t *testing.T) {
@@ -114,3 +121,156 @@ func TestGenesisRoundoff(t *testing.T) {
require.NoError(t, err)
require.True(t, strings.Contains(verbosity.String(), "roundoff"))
}
+
+// `TestGenesisJsonCreation` defends against regressions to `genesis.json` generation by comparing a known, valid `genesis.json` against a version generated during test invocation.
+//
+// * For each `testCase`, there is a corresponding `genesis.json` in `gen/resources` representing the known, valid output.
+// * When adding test cases, it's assumed folks peer review new artifacts in `gen/resources`.
+// * Since _some_ `genesis.json` values are non-deterministic, the test replaces these values with static values to facilitate equality checks.
+func TestGenesisJsonCreation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type testCase struct {
+ name string
+ gd GenesisData
+ expectedOverride string
+ }
+
+ defaultGenesisFromFile := func(filename string) GenesisData {
+ jsonBytes, err := os.ReadFile(filename)
+ require.NoError(t, err)
+
+ gd := DefaultGenesis
+ err = json.Unmarshal(jsonBytes, &gd)
+ require.NoError(t, err)
+
+ return gd
+ }
+
+ // `base` is a canonical test confirming `devnet.json` generates the intended `genesis.json`.
+ base := func() testCase {
+ return testCase{"base", defaultGenesisFromFile("devnet.json"), ""}
+ }
+
+ // `balance` extends `base` to confirm overriding the rewards pool balance works.
+ balance := func() testCase {
+ gd := base().gd
+ gd.RewardsPoolBalance = 0 // Expect generated balance == MinBalance
+ return testCase{"balance", gd, ""}
+ }
+
+ // `testnet` confirms the generated genesis.json conforms to a previously generated _installer_ artifact.
+ testnet := func() testCase {
+ return testCase{"testnet", defaultGenesisFromFile("testnet.json"), "../installer/genesis/testnet/genesis.json"}
+ }
+
+ // `blotOutRandomValues` replaces random values with static values to support equality checks.
+ blotOutRandomValues := func(as []bookkeeping.GenesisAllocation) {
+ deterministicAddresses := []string{"FeeSink", "RewardsPool"}
+
+ isNondeterministicAddress := func(name string) bool {
+ for _, address := range deterministicAddresses {
+ if name == address {
+ return false
+ }
+ }
+ return true
+ }
+
+ for i := range as {
+ require.Len(t, as[i].State.VoteID, 32)
+ as[i].State.VoteID = crypto.OneTimeSignatureVerifier{}
+ require.Len(t, as[i].State.VoteID, 32)
+ as[i].State.SelectionID = crypto.VRFVerifier{}
+
+ if isNondeterministicAddress(as[i].Comment) {
+ require.Len(t, as[i].Address, 58)
+ as[i].Address = ""
+ }
+ }
+ }
+
+ const quickLastPartKeyRound = basics.Round(10) // Ensure quick test execution by reducing rounds.
+
+ // `blotOutFixedValues` replaces values from actual genesis values in order to be compatible with artifacts generated by tests.
+ blotOutFixedValues := func(g *bookkeeping.Genesis) {
+ for i := range g.Allocation {
+ if g.Allocation[i].State.Status == basics.Online {
+ require.Greater(t, g.Allocation[i].State.VoteLastValid, quickLastPartKeyRound)
+ g.Allocation[i].State.VoteLastValid = quickLastPartKeyRound
+ }
+ }
+
+ require.NotZero(t, g.Timestamp)
+ g.Timestamp = 0
+
+ require.NotEmpty(t, g.Network)
+ g.Network = ""
+ }
+
+ saveGeneratedGenesisJSON := func(filename, artifactName string) {
+ src, err := os.Open(filename)
+ require.NoError(t, err)
+ defer src.Close()
+
+ dst, err := os.CreateTemp("", "*-"+artifactName)
+ require.NoError(t, err)
+ defer dst.Close()
+
+ _, err = io.Copy(dst, src)
+ require.NoError(t, err)
+
+ t.Log("generated genesis.json = " + dst.Name())
+ }
+
+ // Since `t.TempDir` deletes the generated dir, retain generated `genesis.json` on test failure.
+ saveOnFailure := func(result bool, generatedFilename, artifactName string) {
+ if !result {
+ saveGeneratedGenesisJSON(generatedFilename, artifactName)
+ t.FailNow()
+ }
+ }
+
+ for _, tc := range []testCase{
+ base(),
+ balance(),
+ testnet(),
+ } {
+ t.Run(fmt.Sprintf("name=%v", tc.name), func(t *testing.T) {
+ gd := tc.gd
+ gd.LastPartKeyRound = uint64(quickLastPartKeyRound)
+
+ outDir := t.TempDir()
+ err := GenerateGenesisFiles(gd, config.Consensus, outDir, nil)
+ require.NoError(t, err)
+
+ artifactName := fmt.Sprintf("genesis-%v.json", tc.name)
+ generatedFilename := fmt.Sprintf("%v/genesis.json", outDir)
+ saveOnFailure := func(result bool) {
+ saveOnFailure(result, generatedFilename, artifactName)
+ }
+
+ roundtrip, err := bookkeeping.LoadGenesisFromFile(generatedFilename)
+ require.NoError(t, err)
+
+ expectedFilepath := func() string {
+ if len(tc.expectedOverride) == 0 {
+ return "resources/" + artifactName
+ }
+ return tc.expectedOverride
+ }
+ expected, err := bookkeeping.LoadGenesisFromFile(expectedFilepath())
+ saveOnFailure(assert.NoError(t, err))
+
+ blotOutRandomValues(expected.Allocation)
+ blotOutRandomValues(roundtrip.Allocation)
+
+ if len(tc.expectedOverride) > 0 {
+ blotOutFixedValues(&expected)
+ }
+
+ saveOnFailure(assert.Equal(t, expected, roundtrip))
+ })
+ }
+}
diff --git a/gen/resources/genesis-balance.json b/gen/resources/genesis-balance.json
new file mode 100644
index 000000000..53fc497b7
--- /dev/null
+++ b/gen/resources/genesis-balance.json
@@ -0,0 +1,290 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "LL5I5MBVV6LU26ENXVZ733A3IKUUTYMG6ZRCJB7G4GZIPSVFPHCCK2YKME",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "Tk6dVeLp2jkpR4GqTkUqmg4b7wwkgYshXpQ6FvWkJbQ=",
+ "vote": "DMXQB3LRyCznSDwFY7QhG+v6vrhaRR5DmcVBkiojGAw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "LMSUTQYZ6PVSFF2UC2Y5BQQ6BPZPEEP4TJDZZAFGWFD7NR6JUIWYOBB7FA",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "FGmOjAOiA5WS36RbQqgW0R8+/7hNhr4d01w57E2Rj98=",
+ "vote": "rwRNmKNCR21GR7fGx0JscZFxAqDDntddmGPBrcPM3uM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "IRLTNUT6N4RJPWHKHTGEFCC7XZPYXWUEQ3KPQDIPDOM4QCHMERLDCKB5BA",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "o3HGYpY+Cgon0G8h+LXt+x51iblyDAjU+UVh3i83QE0=",
+ "vote": "OisiREqsWPCCp/DQAAv/zv3t1cZuk9/EpHFTge45n9g=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BL6N224XD2AO3UFAL3FYT4XV6TT225THQN2WZEVK2SZ2EN65MOWKOUHPCE",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "9t88h4TvAKqMIlRoLW9lfEIUZHkkeTeZNkxhf6cwBk4=",
+ "vote": "M2t4hO2Oe2cM0luPVQQjKFJUdUAve//NJLu31+AjSf8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7QAXF4XTPDNNP7IQXY2GAPU54SWOAEYN3KFNYZNYIT2TGTIATQYVYX34EE",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "EDectK5ACzkRBdTaK2jTJ3p2LRWVdIr2yqhw7vgLBAQ=",
+ "vote": "bD/yb+z+7TenZqd0G950WdDibXWcnH+5tLAUx6UWG28=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "EMOKFY3CUL5N63QL3K35AED5V23GLHFOCK247DW352NJQ2L7RX67TXBKLI",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "gFAz62RY4LPBD2TYmkKkm3RM0sE7kDH7XsA2nTjEMxU=",
+ "vote": "ZgVdTpahN5PgVcTsmNoLW8clsoHne2nfXnx7+jHbAeI=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "VHPOCATOURTR5Q4CCCSHFMDED2DJNQOSHIK5ZYRTWWXQ2NQHIFVVKC7IGM",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "faGqWez33n/hffAoQb1Mhiveb4SraHFUaXeBSTbDvtY=",
+ "vote": "i77hs2kcwF02SElbT7Bz9Pf75IYgF5nhqxhd8nQZ1y8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "ZRTQWKBBNEGRYFDGVLVSI3FJHKIBQ7K527IWKXH75VTFCXMZVZGD4XA75E",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "0lQOFHTrORZmsJaFi3VbWtQM3eqIQuYtSiWm4UFI0/I=",
+ "vote": "mNQRvebWl6fHaO2icPtI4jv62UuaZXRFlz6PUvSXLjI=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MFS6B3P3R7FOKMAU4FLMMMOKQVWTZCFYKEPE6SW66A5OJDWFFRFPMSO6AU",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "XKWnu0OXX1IaFYpm+IB00UXN83ap2d3uJqoSygqQyZE=",
+ "vote": "owK+kZlEr0VuFJZJMslXwpWHbnOdvRxQWLB3N4jpRt0=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "OLUNBJRRR2N4RSLIYNKAQNJIW7FK7M23AOPMIHER6RECHMGUINAV2WAXYA",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "c0Fku3FF4VxiDPxdTJ30+aoR9UdPkQ/iDV0DiKLjCh0=",
+ "vote": "eNwVkHYORjGE1Qme1D9o842cgFAn4nidryN35CgK78s=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7VQYT4PAQBABDR3QODW5MUB3RU6MQ5MIWVIUR2HN3CSPA5H64TVQY4NT64",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "TlelrGjnKdubOuRRMo2Mum2uUAP6UkG2ANDD+BZKOTo=",
+ "vote": "LWlAr9lRXcolQ9h4fX2DTw3LU8/KI6Eix6tJ+o8fsho=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BSJ5G7YEOVTEKKDJZZKULAJPBH26SPIV4RJLV5AUSYZ36ELRN3JHZTZX2E",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "D3aE+VywLz4HA8NPX7mEB3m33FOer4L2ZBRa0qT/1fI=",
+ "vote": "ITColZ7Roe/p+qQXX8yk6FKibhN6sShNWFdvgDk3kGY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "ETMCXHLEI7KOTMPVIOZAAPMQGQTST3CPZP4YG25HBCFB6XY3IIGH3YYUSE",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "4Hg5XgcvEQsPiI7eaNE3hZgKZIvBl93CCKZZ8GZIDps=",
+ "vote": "HOecVUnX8xe86+Rrt237Z+jXFILhBM2I7GvyxCWxpbY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "TQYRDSG5BS5XR6USGROXP5DSUAHJHKUTE5GBLENHBGWKAK2YNLEANQ7ELM",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "hOfLlBV0eDcF1lV7cUxMfo7dKBmCBflrgNJ1NxDv1KQ=",
+ "vote": "AgpySx7yp2177QjUueEJ+HN1xpQTW8Uf6sTu6lCHq2w=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BOKSOEFFJE4RUPFMZHPQRJBEORY3W5BJNR5YUEYGOWKN6IZP4XLMHPLBCQ",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "arO4hIghNSwWvmr+hNHItYFniImOiCjGo4IwPDEBENg=",
+ "vote": "QLeClCU8nw2hHPA9YnrhWSlZ0Nc5awyk5WI/RhipGrU=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "VIMND5G6N6BYICGLXPBBKJNOO36K22HHUDV4KCKOS2STO225AKKGWNLOOM",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "sTAHT53hHEAMC/NXj+N6sAXvBrJzIU3QeIiD8YdyHN8=",
+ "vote": "qXu3+DUAPs71xZtugIxYOOFh4pxQ1zOD/wDvACmNAOo=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "6GQNPBPISA6YUSLIHCKFGNARGA36PPFEI3G77MVFZRLBB5T7ENOFLUIOM4",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "DVqDBjuBNwqDgn/Srl0M0iAhY4F7OaYlPM6Mksqag7s=",
+ "vote": "wwy76l2W220L/T7NxYu0RwkCtgZopwAby7+Ufo3FroE=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "HLBHM4N2YBRERHGBRMSBJ2H374X3UGLPB3EFGQ42P5HJ3AGOZPBZMNLHWI",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "HLNbmSEJe1ToTIT6QJ7xymrJlC9DPX00Ebw3MMyKb9Q=",
+ "vote": "D4+2cKNIhuAv0Rum9x5Tw5RT1/SsmuPpuxB8eQ4OrS8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "M5O226M5GLGRHVQ3CWFFZBFUB4GPLK7RPSCGU25VY2Z5STSQI6D7LLXCQE",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "HgmAAyQuxjdD5dYW6QORoKL756+8PLfFckbtEtd6Qms=",
+ "vote": "+2NoVf0UHCM6+xf2crgh03vXHW4/rvJbvV1kgU6eb8A=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "66UUTNSIJMQJRM7GPNBDBNWQSLPDRX5MFRZYZDDILCATHB6EWFZVJEGC5M",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "74oS0XpjjqpTB+6nKj0bMl4ZMHN5HLvpSCnczSfkX0Q=",
+ "vote": "BWTZ9sJJVzjo20iFWBeCTrc1nxETWHP1IBAzjR+TtC8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "HHDJTAQXM35CDYBVD5YB3ZYXZ3SA2G7EPX377CLYWSA4OAQTQ3JLS7GJJQ",
+ "comment": "bank",
+ "state": {
+ "algo": 100000000000000,
+ "onl": 1,
+ "sel": "UMIH8ldIewQ9K0dKs1iWIwLBNsS2+9GEaZh7/wkvPmI=",
+ "vote": "MSgY48yyvE+XHdeJGj7Muh17rn+JqNt0NMtzOBCM4yU=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "IVRXBBY53IKQW4I5M7CYRCE6W37TYIB4IIR73KBDWTROMVAOGPT5K75E3Y",
+ "comment": "pp1",
+ "state": {
+ "algo": 50000000000000
+ }
+ },
+ {
+ "addr": "UDMWNUYTV4AU6Y76RCNHMTWVZ6QYACYFJP4PAZNBMRWRFZKRNE6V7SBG7Q",
+ "comment": "pp2",
+ "state": {
+ "algo": 50000000000000
+ }
+ }
+ ],
+ "fees": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "id": "v1.0",
+ "proto": "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/gen/resources/genesis-base.json b/gen/resources/genesis-base.json
new file mode 100644
index 000000000..16cef5b98
--- /dev/null
+++ b/gen/resources/genesis-base.json
@@ -0,0 +1,290 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 125000000000000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "5P7SSF6IRMBZKQBH4KJLZMKNK3VB5SHK3ZEUJAUD4NODLLXJLOCJPY6GWA",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "R3ZILpY0ZOgcRbb3spZyXBtwt4Q9oeP4JkGmLWvp1wM=",
+ "vote": "fjzsjb8BcM4KhDPQyf7qxisv8mWzb6t+nv9eZnPbq3w=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "23PVZMF53OVWQ5YRJDG75P774AAQDOWMGT2YRRO3ZVCAMV7P6WQDUHTHSY",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "M/NhvAm8FfFxgcEynZ2XyiedqA0ZXsYPpQAT9j8UStg=",
+ "vote": "3QrVWdEk/JU5+W3KsOqZQWwqBP+3syw3jWkra5UlRyY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "5GWNQULH2QB7IAMX7XAIYU6VQT7S4JTZZE2MVCO7HNWGFHM6GTGUCFVEIY",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "lEHWhsPoBlboeK+m9ASMV+eHe6aMWFr2wDFtW7BhZps=",
+ "vote": "m9hkZTh3JZtipCgRMQlxvjLHEBlPWI5rzi0a62nYmn4=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MXEYXXM74MXMOJ7VCDDHYUIDELZBLETSB6ZUCR2K4OLCFKYPVBTJ7Q3WXA",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "dRgwg+/Q0WhqJu4pzVmFN15czLEwpzGbFHnqsGktRjk=",
+ "vote": "idEmvipGvNuebDonCSLcFJhaxjpft/1MZqxGzM0MOnY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MDPD6UI4D7CRJO2QP3OA4GJAZXMGE5LY5XV2FQ36RJ5MKKRLNT3NPXFM4Y",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "teFS7YWTutxUvxSWXHzuS8wwkX5ueR9CEanNR0IhsHM=",
+ "vote": "FpRYYVxA2I72a0m3+bJ+4fzD+wcsgRbMXzPrX+ihZKo=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "NW3ARRUBZDSTXH5FXI54LZUATS5QHYZGQHONXJFZWUQCU6EYVM57KQCSGU",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "3vwiEtd2C0wm89P9vex9nud8nM4rdgCd8OAT7SUWfpA=",
+ "vote": "dXoHsQvvUY6HiRIR2JUaTFR21RROq+tzA3ApYL8lw7I=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "54Q7QENDL6GRX7XOBONV3AY2OHD62IGQPEFNUCKL4IH6XD2HGSJETIR22Y",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "b0hfKTC81csTjO/x21E6w4OpDlcVNCZBWky0gwpo2qo=",
+ "vote": "T9UBwWaOpUnq4azO1yyaH8rTHWXzfanIHib0RxI9N+c=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "DU4LJRJKBIIREB7CKKWCOUP6NHUM5HKKVYH6LU2ZE63D67PZRL7VSHNADI",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "JIdWGPjyKJZCfC5Y+IYxqC1oDEa2pXePQ3TSBQ3v+K0=",
+ "vote": "nYIVrFrNAf4lWvt1RiLR2Nv/EzaxH5us/7prSQqbFgM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "Z745PXKLO7EJOBMUYKMZQXZJQD2NOI2ZC5CUUVZYQDL6S5FCMG2RTDR43U",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "29+iADCFdA+wltMu0ZgCPzAogbbp5kr5zRRJteB2mdE=",
+ "vote": "vzXs2Ebue388Ya3vVgEZ2JX6IJaZuIn2MIcl1N1T0lk=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "PA2FWPXRLKGKDRTP5JFYWWM3PBE77SINTRZ5BQCCKAK363TMMVFUJOM3OA",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "fKYC8BIRXI/4s0E52wDYd0jSiOgvStdA+8Vr7fTh0TA=",
+ "vote": "+jaGxdKU5PpxQxp7XRCYhc0Oss6josumZW7GNhl9Kkg=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "R24KUNIHRTSDYEWVJVJMHJ2IYE3NXBTSE3NQXOHPG5ULJJ4YEP6DXLWIQM",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "J1yWsrjAlqi0/HXQ++yK03+iFuPPu9q0HZVWp2V5gjU=",
+ "vote": "db7v7DZeGSeI9t3eXXoO0DxMghAGmidVh3bGBOeqcC8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "WLEX5HDAKMEA35NKY2QUXFXIFIKMOF6PHLMNDD25KLNNTPXRRO7TPAP6SQ",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "AYda5HHpJpFnpbQ/oeqvc9eSKPccHldGczvqwsJzWiE=",
+ "vote": "HjXz+GAo7yqeKRsOB+RQNr2V5vjwS/bMynMTr37T8D8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "XSQO4QZQZLFSAM7GOGYIOFH3EKTYC3WVO3CND7V4QQ7KXOQBD2OYRCCARE",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "joC7f9011hnDVa87WsaRSD/+Z8HyJ6tPT8NCQJJKNQ4=",
+ "vote": "BRVqGnSjJZnBM2MdHs3YkApVB9iR6nd6VVTx2kvPQjw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "KE2HQAZHGJDKZUKRBDBQTBQOSVSCDROUC7UFJZ27KTNHBKON6GKFBDD5MY",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "lb/5w4Q/6KE3g2+KeRc4GrslWubT/2QkKQX9pJqBwWA=",
+ "vote": "gs5MFqUUn/IDBb0kb4VJiv6gIOUVIcSSmpdaLteAKmw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7SCSRGS3YLKORJLJI6AGJOBEQYOVEMCPF52F3TK6KZZRZVZQCKCFGOGWIY",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "ImC3tocbWRoTdd90owSbskgsvnNpyFW1c79CNJl2mjU=",
+ "vote": "xFrT7tp0Yth3j39XJJKtKxxMaXDiFRbXtzoOjQnavFY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "2FCYNSOOK6HGVK4ZJZ3ROAV6S3Y6KYY5467BY7JQEVTT75RNSIM3D6WK5M",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "aEKSz4Hqi6JY82890qxqAUhkiQ2YvDYNrQBjq6UOgwU=",
+ "vote": "Gxyf2lPDSVhLrfUOtS6vS0fvrV9g22XHR/uUl7uzzsw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "6SUHM22SBOZ4O4E7VQIB2FWHGCFLBCDOZAQ6AWXV3CTEDVB45UZGX2GZ2U",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "OXOkW9CbraU2UM1IjDkkpS3zGlBCjm0SHc3mFgZrUdE=",
+ "vote": "ssh7jRwaim+vTO4W9F2tw+j5BoFOqTTTy1DOZfqIXwY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "H2A6ALQ6U7P2JSABWPRLG3IRRKH3AAUJFZXLI7YHCBZ7K6DOTHWBVGFIZM",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "B9rVwMkp9YAbdOz3vOK2K7UyDBsHBGeHUEcErioligA=",
+ "vote": "MyheCHyiz464tL3rdZkztIl/fx7ARYPvEv8xuFozCfM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "5ZBBKATPM6HM56TOGQOSPFMIOVUZJJAZAAUATNPVCB6SKQYVIMRUOC454Q",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "g/995Lmd4y+IhbDCD6CXLyuIMs+GhFbXbmvVSCwG9RU=",
+ "vote": "SJ0Hj+Xau09OB97nfbktnTnA5K4MqRzQCwDR1BN4R+8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "UPL2Q6OLQHLAZWXBEP7DTN3225ISOJ45DACXEDCNJ2UBCPR7EWUCPMW3KE",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "X8Df4E1SSkCEoWYfDOGMPTSoecc6FaHTzDRNYpOVL7I=",
+ "vote": "ZUkY2tLQ3XmcG6aVWjJLdJvWjWfoPI7EliAq/ZYaIUg=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "N6TENNVOWAILNAZIOT3RFX6C4DPC2HEYOQD5KBBGFDE77FUNZU7NMWBIKA",
+ "comment": "bank",
+ "state": {
+ "algo": 100000000000000,
+ "onl": 1,
+ "sel": "vBIkGS3JImg7DVRGr6L3ZLuzFe9EXSDedvZkHRgAGEQ=",
+ "vote": "NoVimVCKTiIntOa/q9zUcVEN5erCIdD0c5G2eIAzBL4=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "XHV5PYJARUBZA6EMG6ERYAL4ROXVTLVHMC2W52LDGVOVQNCQOAFNLTG32U",
+ "comment": "pp1",
+ "state": {
+ "algo": 50000000000000
+ }
+ },
+ {
+ "addr": "4EDQJQLPFX5CBWHJJ2GQXKMV6UQC2O553PGZVID6QVB7JZT3XBQ5MCSD2Q",
+ "comment": "pp2",
+ "state": {
+ "algo": 50000000000000
+ }
+ }
+ ],
+ "fees": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "id": "v1.0",
+ "proto": "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/gen/walletData.go b/gen/walletData.go
index 95c508c27..7790e9a6b 100644
--- a/gen/walletData.go
+++ b/gen/walletData.go
@@ -27,8 +27,9 @@ import (
// DefaultGenesis should be used as the default initial state for any GenesisData
// instance (because we have no ctors...)
var DefaultGenesis = GenesisData{
- FirstPartKeyRound: 0,
- LastPartKeyRound: 3000000,
+ FirstPartKeyRound: 0,
+ LastPartKeyRound: 3000000,
+ RewardsPoolBalance: defaultIncentivePoolBalanceAtInception,
}
// WalletData represents a wallet's name, percent stake, and initial online status for a genesis.json file
@@ -40,17 +41,18 @@ type WalletData struct {
// GenesisData represents the genesis data for creating a genesis.json and wallets
type GenesisData struct {
- NetworkName string
- VersionModifier string
- ConsensusProtocol protocol.ConsensusVersion
- FirstPartKeyRound uint64
- LastPartKeyRound uint64
- PartKeyDilution uint64
- Wallets []WalletData
- FeeSink basics.Address
- RewardsPool basics.Address
- DevMode bool
- Comment string
+ NetworkName string
+ VersionModifier string
+ ConsensusProtocol protocol.ConsensusVersion
+ FirstPartKeyRound uint64
+ LastPartKeyRound uint64
+ PartKeyDilution uint64
+ Wallets []WalletData
+ FeeSink basics.Address
+ RewardsPool basics.Address
+ RewardsPoolBalance uint64 // Values < `ConsensusParams.MinBalance` are adjusted to `ConsensusParams.MinBalance`
+ DevMode bool
+ Comment string
}
// LoadGenesisData loads a GenesisData structure from a json file
diff --git a/go.mod b/go.mod
index 96ddeb704..3ef58a880 100644
--- a/go.mod
+++ b/go.mod
@@ -3,63 +3,61 @@ module github.com/algorand/go-algorand
go 1.17
require (
+ github.com/DataDog/zstd v1.5.2
github.com/algorand/avm-abi v0.1.0
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
github.com/algorand/go-codec/codec v1.1.8
github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
github.com/algorand/graphtrace v0.1.0
- github.com/algorand/msgp v1.1.52
- github.com/algorand/oapi-codegen v1.3.7
+ github.com/algorand/msgp v1.1.53
+ github.com/algorand/oapi-codegen v1.12.0-algorand.0
github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
github.com/fatih/color v1.7.0
- github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f
- github.com/getkin/kin-openapi v0.22.0
+ github.com/getkin/kin-openapi v0.107.0
github.com/gofrs/flock v0.7.0
github.com/golang/snappy v0.0.4
github.com/google/go-querystring v1.0.0
- github.com/gorilla/mux v1.6.2
+ github.com/gorilla/mux v1.8.0
github.com/jmoiron/sqlx v1.2.0
github.com/karalabe/usb v0.0.2
- github.com/labstack/echo/v4 v4.1.17
+ github.com/labstack/echo/v4 v4.9.1
github.com/mattn/go-sqlite3 v1.10.0
github.com/miekg/dns v1.1.27
github.com/olivere/elastic v6.2.14+incompatible
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v0.0.3
- github.com/stretchr/testify v1.7.1
- golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
- golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8
- golang.org/x/text v0.3.7
+ github.com/stretchr/testify v1.8.1
+ golang.org/x/crypto v0.1.0
+ golang.org/x/sys v0.1.0
+ golang.org/x/text v0.4.0
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
)
require (
- github.com/DataDog/zstd v1.5.2 // indirect
- github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e // indirect
+ github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/cpuguy83/go-md2man v1.0.8 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/fortytw2/leaktest v1.3.0 // indirect
- github.com/ghodss/yaml v1.0.0 // indirect
- github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f // indirect
- github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
- github.com/gopherjs/gopherwasm v1.0.1 // indirect
- github.com/gorilla/context v1.1.1 // indirect
+ github.com/go-openapi/jsonpointer v0.19.5 // indirect
+ github.com/go-openapi/swag v0.19.5 // indirect
+ github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
+ github.com/google/uuid v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/invopop/yaml v0.1.0 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/labstack/gommon v0.3.0 // indirect
+ github.com/labstack/gommon v0.4.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-colorable v0.1.7 // indirect
- github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mattn/go-colorable v0.1.11 // indirect
+ github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
- github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -67,11 +65,11 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
- golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
- golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
+ golang.org/x/net v0.1.0 // indirect
+ golang.org/x/term v0.1.0 // indirect
+ golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
- gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 // indirect
- gopkg.in/yaml.v2 v2.3.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 548f9d7a0..8f3ebcf26 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,6 @@
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/algorand/avm-abi v0.1.0 h1:znZFQXpSUVYz37vXbaH5OZG2VK4snTyXwnc/tV9CVr4=
github.com/algorand/avm-abi v0.1.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
@@ -13,21 +14,22 @@ github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dU
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM=
github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
-github.com/algorand/msgp v1.1.52 h1:Tw2OCCikKy0jaTWEIHwIfvThYHlJf9moviyKw+7PVVM=
-github.com/algorand/msgp v1.1.52/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
-github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
-github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
+github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
+github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
+github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
+github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4AJE=
github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3IS+UTZe1W5c=
+github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
+github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
-github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
-github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -35,75 +37,64 @@ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=
github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f h1:eyHMPp7tXlBMF8PZHdsL89G0ehuRNflu7zKUeoQjcJ0=
-github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f/go.mod h1:GprdPCZglWh5OMcIDpeKBxuUJI+fEDOTVUfxZeda4zo=
-github.com/getkin/kin-openapi v0.3.1/go.mod h1:W8dhxZgpE84ciM+VIItFqkmZ4eHtuomrdIHtASQIqi0=
-github.com/getkin/kin-openapi v0.22.0 h1:J5IFyKd/5yuB6AZAgwK0CMBKnabWcmkowtsl6bRkz4s=
-github.com/getkin/kin-openapi v0.22.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
-github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/getkin/kin-openapi v0.107.0 h1:bxhL6QArW7BXQj8NjXfIJQy680NsMKd25nwhvpCXchg=
+github.com/getkin/kin-openapi v0.107.0/go.mod h1:9Dhr+FasATJZjS4iOLvB0hkaxgYdulrNYm2e9epLWOo=
+github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM=
-github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
-github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherwasm v1.0.1 h1:Gmj9RMDjh+P9EFzzQltoCtjAxR5mUkaJqaNPfeaNe2I=
-github.com/gopherjs/gopherwasm v1.0.1/go.mod h1:SkZ8z7CWBz5VXbhJel8TxCmAcsQqzgWGR/8nMhyhZSI=
-github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc=
+github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
-github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo=
-github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
-github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
-github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/labstack/echo/v4 v4.9.1 h1:GliPYSpzGKlyOhqIbG8nmHBo3i1saKWFOgh41AN3b+Y=
+github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo=
+github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
+github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
-github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs=
+github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@@ -112,10 +103,10 @@ github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
-github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
@@ -132,81 +123,72 @@ github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s=
-golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
@@ -216,12 +198,11 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
-gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 h1:MZF6J7CV6s/h0HBkfqebrYfKCVEo5iN+wzE4QhV3Evo=
-gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2/go.mod h1:s1Sn2yZos05Qfs7NKt867Xe18emOmtsO3eAKbDaon0o=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/installer/config.json.example b/installer/config.json.example
index 91b9413d9..84476092e 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 24,
+ "Version": 25,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 7,
@@ -49,6 +49,7 @@
"EnableRequestLogger": false,
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
"FallbackDNSResolverAddress": "",
@@ -63,6 +64,7 @@
"LogArchiveMaxAge": "",
"LogArchiveName": "node.archive.log",
"LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
"MaxAPIResourcesPerAccount": 100000,
"MaxAcctLookback": 4,
"MaxCatchpointDownloadDuration": 7200000000000,
diff --git a/installer/genesis/alphanet/genesis.json b/installer/genesis/alphanet/genesis.json
index b3944e7af..b53c0924d 100644
--- a/installer/genesis/alphanet/genesis.json
+++ b/installer/genesis/alphanet/genesis.json
@@ -17,289 +17,331 @@
}
},
{
- "addr": "NXD653KPZRLYFZKUWNYVZUDUBMB5NWGRZYSSMNOAR2GKNR4WE4D6JJ6SDQ",
+ "addr": "IFLBDQLECQE63E6WOFGD5RCN6VC5RZJNDTRVKPHIOT4TIKN3XGUBZKYUNI",
"comment": "Wallet1",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "3s5/mxcllqsE0KabL4rzHC9bxLp3BKHLHUfHrl2aYRI=",
- "stprf": "9yV+6Z2KoQuHJWhDqNZ/ULQtcatXQXVtk2Ei/nBB0aH+3p4NcMj8ONbJNi88sqrsCHR1wArBYnVtwSk+Qwq/6Q==",
- "vote": "F3ZUaQ+NHy0+Oi39s/ah4riH10kVh9wqdo2E8Vq1Q/s=",
+ "sel": "42hOf+fGu3hU9KPwto+1k4xLvgU1X05B/ewt6kTHb0I=",
+ "stprf": "ROW/oLVFugOErJYee2uA/qH1JtVu3BjDisDN4+5QL1P9yXcBoeUvOvoNWqLzvaqD10CSIGwQ90SefJUaWUz/iw==",
+ "vote": "6LHSl+gxCk5ijvgznsNTC3JdtMktRm5nYnItYF5ODLU=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "GMD5VUK6MOF5TNKJB7MGB5TRWZLFDG435LGZFCB4GK7TB75EDT3XEJKDSI",
+ "addr": "L2I4XRULUH4PFPDAE223TKORLME2PLDTDSR3EUYT4FUGHOQCARJHWSPHX4",
"comment": "Wallet10",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "aSAlZpmcmnp/ITcKu+tJqIppOaXy1SrvtzIbLW5ZVxQ=",
- "stprf": "D9rUr2pfk2rcE2h1BZgyHvzsiHC2Lco6fxTgulZd66A1t2+IY7TiyE+cW/yzyASrcFM1ku6HbDpM+dsn81BTFw==",
- "vote": "FlVJov8Pt7nkVuaV0g8MWW0KoWX6QnLilA35wrXWXn4=",
+ "sel": "inBJs/CHh/7pZ3DfBsD9PrHmBAzpqw7J9CvENv5q2Zg=",
+ "stprf": "zBN4OM9uUgC0hyMNMF47dHZ4d8a9j51eGdiZP5v22flnvxCrt2HatbsFhEtsa8OOppPaPfywr/vib4iel2VGtg==",
+ "vote": "GADC6zbLVfN8lPD/MgfLIWX3n1CNOv4h4JMZFD4lops=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "QB5WTO545MRUDHPP3H4EM2H7TJDWEB3CBDDZGAVIGIXR56S5EIZDPYLU44",
+ "addr": "QW3VLNFRMRIPZMTWYMZ75JKICVQHGSWCHK6FYJJ34WOUXF7DICFSQUUSNQ",
"comment": "Wallet11",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "hNlU23ZwJ0uIasSwuy7urtPVbHVmwUyUSBYumIqxuKY=",
- "stprf": "3eJyN0gNwYluwBPZQDjjPj+lsSsuyJlRMKm1yDeNfw/lMyNdkUJU59aJdkMuye3qd5Av6wjnxhGiCXq42WUo4Q==",
- "vote": "wmXN99MQgOuMgeShyJ4NcL6jKQbouKrdUBgYUhJLDfQ=",
+ "sel": "SqrNHOAocxphp6hHLwQ7xc6+w21wp5FhM7QuLGEvtgI=",
+ "stprf": "So85r1kLYQcvwI3EguYOHqnQmx/zAtscVoqH2xG2d0syxG+I6Jxs7H5TqG9U/cRtyUO/FHwdpS6KrVpi4Y5CYA==",
+ "vote": "+aGGk26km1lTSTqKu4D/qyPxLSHv9lyUyDkQAoVOuRY=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "FYT3QP5FN27QM53TZD77W4LL6SNS4ULEAIQF6AYPCFEJIUSGAYIXMWCEI4",
+ "addr": "7L7VXAAMTY2JXUHLYDI6J7DW2TZ6VFCS3EEPWELWBB7BBSSAIYMTKYNZZM",
"comment": "Wallet12",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "FVVkzU8fEt+LhT6Zk1ITNSIvEcY6uVXzKWQbU+WIp+k=",
- "stprf": "GZ903Jk56IN2uG+OjKl8jfRuuEKReyzuyU23MZxLvDjgrcHfIVhs/z3pgzOrrnajV4jX5PSasI5L6Vz8iuOLfQ==",
- "vote": "lVPjjTJMZpvLvrhNj8U4D6Emc2vii27ZaeClBk5emIM=",
+ "sel": "nq2sbH9ADGqbEHEzWcU/2R5VID9dTnRFsTZ763s059g=",
+ "stprf": "mdPttPihlIHh84AXHagYPVDueKhpwbf7GRVdJCjIf93qF5t3ub3mEKSYXLI9ic07LatdEAHs98vpD/dQw1LyIA==",
+ "vote": "kTOdWK9fv4MKDhui+Cy8vl7j3sbZZJzvo+4JzvO0tgM=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "UHJSEAVFDIOLLT6UYIFRLTEV7CTQPYJEQDAIHRVYAPPSX6UJIMVZR4P7AA",
+ "addr": "HW3CJSBQTAEZM3PMGACQFS4Y3ZY2EB2HJ6FGUIER5JLM7L2H2GVGELIIYQ",
"comment": "Wallet13",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "0P5nHFzLSU+PEcM5r58V71E6Mf4IR+iYZXHbldH60MU=",
- "stprf": "TEyFNqFjWUmxmjMTDatOP8Aua8P1m6qIRYqwDJsCUOAvUSsekq+2MxZ4MT+Pfjl69GH6SR9siLHvPd9vgL1mlQ==",
- "vote": "g8+LvQ+DgbIA1aDbUkkNfYKcwFWTO/UK6ljxeBYcp5M=",
+ "sel": "uDMnR6taOIsRI53BdNWR5KAtgwz1pkl4poTcirn4PXg=",
+ "stprf": "vTGV7wr88qg+69u6R2YFTbnLR/olPz3K00GMywFvBjUuIYXdCsc1c48HyxvQcC8XUg6RUq8FdYnm8qttmWVj/w==",
+ "vote": "LY0dGGSkRZnF+bwjC11I9gbY+S6s/Q9GPr4qY9jYSRs=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "6JLQSZQMVQUWPOYLEAOAOHDMC2PI3ZXPT5ADUE6YMBVL36EKQ2DHCP2WQI",
+ "addr": "EEF6LKCGGZPSURHR64WVSZ3QN5GGCCTZ2WYWBQTN4GYOVIUYFHWLD2QTWY",
"comment": "Wallet14",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "5xOCDWQnV1wQccNxeXOPsvsxFu2rEXslM/JWtPKxeA4=",
- "stprf": "zemmbbCDp5ekkJr6IrqrGewFrOHltjVBNBoCak4AmUOCqwtDdnCKbQq4NphemyEghfpfXKfYWhFQ44NYqCLTpQ==",
- "vote": "gjDiA9kbvjXUahdEOjygyw86x1irFZbeyHCw/qHExwg=",
+ "sel": "GjihSRI/PbNMG1CkDfozuIrJKE3YhZePL2cm5ZltMU4=",
+ "stprf": "8F2zyytQ8wBoPKgsM8QpX+EHTd0p3cHXiXR7S+36xDf41DGMQgxxdZIgiovvieL8HsTkqaHKqTIB/At2qhJT/g==",
+ "vote": "fmtJ2qRZzzSlVn7wYvZ06osUGI/mnJrUviwRlhxOA7s=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "HHOHUW5U2BLQEWUUQLPXZMVZPCJQUJFPZCWHCBYOVHQYON2FWAARD6OMDM",
+ "addr": "FAHBD3SIPFL3MUISPH5QZUVHAVSHEUKHCRGM4CISXEGMQGCK2HXZ52KOLY",
"comment": "Wallet15",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "h84u9vUCoCM85CLMfiCTPfNDO1bPqaafWlqC+PEmKQw=",
- "stprf": "2MgJ5pgaDhGuo00re4oOsz+TQxCavOglg1Zmrir3K0USmj6grt1NYiJv+51xvt4BIkKG/Pk2E/wUk38jQD3CKA==",
- "vote": "pbyNjS5u9THxb+y6lsH28T70EPGvBJ7Y89bzCdJAP7E=",
+ "sel": "/LgL29GFIwqlcGAM7PBwK4Gwdupr3IiZSDEXj8IAayI=",
+ "stprf": "w5cuOipJ7w/MIszCChPmxXW2wH1FKXguH1UTkXc14FQkQcPuYiX9ywiYr5fkBveyVfkq9veRZ/2DtCzoxf1Zog==",
+ "vote": "o10w9Oysx7FrqFS1cAgNZilEADvM1oECxMvXMfbzzFE=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "4U3ACMGUA7OBUE4BJR2U6CZQVJOBWQXEIKKG74CYOFHW7RYSLM6WUZWO2U",
+ "addr": "H57FFRCSUHLQ5DGCAJGWG3LU65ZPKFGGU4ROUGYZVY7C7AQSF7D2OGADBU",
"comment": "Wallet16",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "LHWqqjw5rp/ar0/EjU0luGknE8+TYASmg8orMF+IAMY=",
- "stprf": "k2kWdZ6Q3S3D/6mlI3QK7Z9kmCa4kbnJa985NLs5dYb5mkdsVueZVJ0kUnVdmsthCQVq5AYxeYEZiCCF4th2Ew==",
- "vote": "1koKJypVgKG+8nt4k4dKbHpR64+aBeDi6frjXaOioro=",
+ "sel": "zfFmphc8dVPBiQ3RuWkSSIUf91uf5uekUh0d1AaI4wI=",
+ "stprf": "YYKt+pocOetQBv1pjWUAG9x7l4bvegXhOg1ycJslJ5At7xammRLTj24VKdFXcS1g/3n5dUepuSRY8q3005VK2w==",
+ "vote": "Uh5V7H5lf7oStUJCYprK6YNuDJJuR01cwL3BNrMsqEw=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "W6MAVGKBYY3DFJRNOWOWKOYFWZJWYILPAFB4IENTOT63ZHRYXNTOUXSDCA",
+ "addr": "FE5NEIL76HWEZBA6PRES2HFUK6WHL2G3F6LNPIQFJHQIL6A3TYPBIUGK2Y",
"comment": "Wallet17",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "LRfmRYLM91aDXt2llKpnYAihw3iVgk8G3377N/wHhKk=",
- "stprf": "GZFyAPtAtAo+QE4nR7bZ87G3U6aZOTfFmyufc1kYOd1RP6bGMQUspQ8B8XKC3Y/9mhnyrVHNzhEqqYASqcyGmg==",
- "vote": "TSgNl6SMI8/0kfD/YMpnUEGNqka660SUa0Fhw+ESh4A=",
+ "sel": "BsUldykhgPcA4XPiA/dNjBcCC0t4NZSV6+12/iLN73c=",
+ "stprf": "g8IVrfsPfJF0R75+3VOf0H0cpABr0ZfoA1JDZoSRlovTlgWrfi/Vs1+0tMTIZX8hOlCBEZdRXAZJccqkuiyKxA==",
+ "vote": "DcNKOUIYJELV9C8KqNtxG357iA+RWfojxhWSOaHPPKo=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "SW6ZX2GHABLYFZ2GUFPNX65RAKJYSCSBPZMRDIE5ECKB5Q6NW34EMPWKYI",
+ "addr": "L7RZLSEMOI7ZNHQA2QWKU6EWZXGXFKKOK2H2NF2P6IV6WABL6CFCEWJQGE",
"comment": "Wallet18",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "LAkKAzZwUqWv5IiLGwILXiuEjo79YmfrBSraUGxBRvw=",
- "stprf": "xljekFxo4XYiwL+te0DMuXqQigvRVVqW8Y1MSrz+nqTlJ8v07hEkDu2ZPVAdeWb8cL5K4fkI/wYICFbqfzf8Xw==",
- "vote": "XJ8smxZOlGTzt922nabeReh/CfZ3nPs58/72gXXahq4=",
+ "sel": "1ymlzVLbg6rQVbI6fdLCIMXWtm9aSEsEJUXUMLKnwQA=",
+ "stprf": "5CLa7F4RIzo2vpLOEQOuvGhRCy4ciNgJzwXWvdOgPig9PxRhXxkwYiiTeJJpWHPeXfmC/tNDHLWSj6uqcH5GUg==",
+ "vote": "qq8GS3R0TizG5ZY4v0qJC0221C0bDJfdS8DzlFRutDw=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "EOGRZZV2MXCS5WG44XJ77S5YIX5JUWHJ6W6FDBOV5TSROTKMNJEGKNFR6U",
+ "addr": "KURVUEJC5PSUQ3LZQGDGW2GWOVXX5Q6TAQQODRHPQWV3LP37YJIILIM3XI",
"comment": "Wallet19",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "OBgdemFmMNVkkfNNnSgxxaH0VGbLCpAs23qk+/i5pDk=",
- "stprf": "VS3bpYvHZsV+i4E9Rck1ADqOFCe9mv+xEuB/4AbwL5reIs+XmgeSqeZYtjA44Yhu4nMMrsUq5KEfeJt+0zWw5Q==",
- "vote": "Rcwn7JRB5X45mXj80+ra2C/1DB9uUS1W/E29+CBI1w8=",
+ "sel": "f3U4nPco74hk8vbj0LhI7QOEy/oaamjw+ETLFCY/5+8=",
+ "stprf": "gzVVkrQHqZgvHLQlvgJmNE9bmulTDS9uzu4cslUQVwxeOQ4GDb5wLjpZYCY6VtyNT7vroVYHxnG3ICCIRIG/Ng==",
+ "vote": "VEbH/aNT84sCw/Nq+lT36We3/8cEg+3/VggJQi6n1JI=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "UQCHKHGD7D5N6R5Q6VPQU6XNOPSULM53DGP27ISJINZIH3XKW2LHBYAKXU",
+ "addr": "FXS2UMCQGQU25NDPROMNGE7B2LYKFSKR5AJOUBDZZXQCTCYASRTJBFLPNQ",
"comment": "Wallet2",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "loxF/eE88JkhmCvdMl+DmFsUvLeFuUaJNv1ecvnBDjU=",
- "stprf": "faSsGeoQ5SWNDgD01rg+zZ3PZWqTMBIl69zyKdP0N/wSYdmO2zApuXEXj8ZEzzV3sG0d7/vdnlf1hTR8awofug==",
- "vote": "FS57UeflDSSSrlRUlwabMBJRKHGef2Td3dSS148tc8w=",
+ "sel": "/mB2RscORv3ZQzrNdoOlqtt5T3Ed3jV4aXSIWj11TKY=",
+ "stprf": "e/1sji7pgUnH7wjxIGoBV19RhAdWNBg8O8dNjnhpxs5Zg8WhJ2q//XDx7GqaUsbMi27fJKweo7QBg45oqVHDcQ==",
+ "vote": "LcuC11YdI0gb0UAnKwWxopbSVwz24cf4CIoXfoqfx8M=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "GMXFVGAXXUEFXSREXM6JUQD4SVLXPSHG7YU5FDU4UEAY5WNANGAHJQOFGQ",
+ "addr": "NTRU2PSLQ6BZBJTCXBW2TO5Q2QZ44WLBFOFCEHQ2OVLT7XPPGJXDEADAZU",
"comment": "Wallet20",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "DKoC+wEPaybLRdj+MKm3Qehy0br4i/UOaP+usLztyik=",
- "stprf": "YqHEbJ6P1HhxLNai9oqBEWnthLcnBZjPuEoGxbM8QE/LWgAFWA8p8ZZ1UpW/l4dHZWq5BKYym3bZGpbtFMjuZg==",
- "vote": "XtL1RzpArMLvV5yKlbfhKMR0NhNphJMUgkINSQ1yTXY=",
+ "sel": "y4VpJDSAPW45VRD7BJCasK5Flk88LNGMlC8fg7hJQcw=",
+ "stprf": "NfcaxjmygrzS3oXKzk+80/jOQjFN9UCmio5Kg8EHyrBBAAePxFHR1ekuxMS/FhUlG5beHKuUl8UMcSf8F3UTgw==",
+ "vote": "9mqKbF6p+n6H7KRw3iDFGC0VLR4jgR8DEJuNKt/IPSU=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "FO2JFV5TECTX5IPM23NHVQ2G7KM2CB463RBZPWYAIYPG2CFX2NHDZKTDHA",
+ "addr": "GCNDS7IH5CFC5WR5XCABQVW3RP6XCZILHIAA22M2WSYXQTEXZKK5FN7P6I",
"comment": "Wallet21",
"state": {
- "algo": 1250000000000000
+ "algo": 500000000000000
}
},
{
- "addr": "M5G7TXRP7LQYML7RWTJWYKVC63PSNLPT2Q4OML4L4INNYIHCOFMROUALDM",
+ "addr": "25R65Q3I77DH6AD4CHWCRB2FFNHS7HWC457Q5UJ4Z5YUOHGN736X6EYOTI",
"comment": "Wallet22",
"state": {
- "algo": 1250000000000000
+ "algo": 500000000000000
}
},
{
- "addr": "VNIGFIMNLXWU3HU3PTTTFTNXYUV7BDIR3AEH7F7T736XKBSEBWRIVXXYKU",
+ "addr": "ER5XS6LLQJEQPZTFE4GKODWI4EYQIAU6FPKTYAUO2UXQK4DFZNA2XHK5VA",
"comment": "Wallet23",
"state": {
- "algo": 1250000000000000
+ "algo": 500000000000000
}
},
{
- "addr": "GKMGMA4PNAYFXY3ZGN3XYZGT4H6BVMN23AZNN6OW77QZOKTTJMYF54FWP4",
+ "addr": "OF6PFGGVXCRYT7QRV47UOZSU4MA3V5S5L6AOFNPTRHB37W5YJP2YLGQG6A",
"comment": "Wallet24",
"state": {
- "algo": 1250000000000000
+ "algo": 500000000000000
}
},
{
- "addr": "3SDISKXXLMPWTBSABV2NJQ5MLIVKHGDXOGHMSJY4ZQ3AEBYLESF5AJYMNI",
+ "addr": "2D2CLJH65QZBAEJTEEBQG4IHVE7YSDUYHTDORK7FQAAFJJOD2KZUO7XMTM",
+ "comment": "Wallet25",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "J4QUUDSS7VLIQ4A3QLI4ICO4OPVGKEEYINWS5T4KGVKTUH7TOCQKXEZGB4",
+ "comment": "Wallet26",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "V7E2MRRABU3LBVKSV73VF5VYHAJCPGXPEPH4FF27MCG36S4E7MLJDMY6JQ",
+ "comment": "Wallet27",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "BKPC3WT7RUYZF7OITC2NGHOXITL7JQOAURUECCCGASLNMMAYLAA4EDNFKE",
+ "comment": "Wallet28",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "HUDVJCIX6OGGTQUMV24CSVT6GLYFKIG6T3A66A23CPYU3ZEXM4AQ5YZFQA",
+ "comment": "Wallet29",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "S2M5ZXOOTGL3QNZXI5W3TCYDUQSOJPVJ6P3EKR7HR6A757LPOUMU34FXHM",
"comment": "Wallet3",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "vfxc0A3ROMRug/YbwOFxbJGk+6Skh7rzG5r8CzQL7us=",
- "stprf": "/bS8MveMPCzKd1Nwl4aFuAfGvwMwj/tLdbCVHBDV1mPUSgvqwgxTXWNZaRF3tX1ietC0DMxfJcb/51P9IblG8Q==",
- "vote": "XG9EJKvg6GPfhQK+4A9qCYjOtCJrJvL4tBSy9pG7YWs=",
+ "sel": "4j+EEfU/f2fj4Zm43pbc8ShgPqesfl43Mtr/AnRBbkM=",
+ "stprf": "Q0t5OlxrCeYa/jUI0mrN38Q7AZGNXDdNzohV8uuC1uazQZEGIcoSKcjd2eoyH8gJJDqJDHu8KwfCSMypc7ryIw==",
+ "vote": "8W8WOhfRRyTxYvG0WW4oVUfjF34cUwQPANJ97uTjSz4=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "P22FLWO27IIZNALADADVFXM5SI4Y7EQPDDB4WQDJS7OTMD5RGE6VFQCJAE",
+ "addr": "5OC4ODTA4GED25MPVQTNGX7MQ7OTLKQM37TNFZOHVV7ZVKWFOX6AZCGUD4",
+ "comment": "Wallet30",
+ "state": {
+ "algo": 500000000000000
+ }
+ },
+ {
+ "addr": "RVF72KTU5FZPEEDOBPCP32B4NWQP76AOKKICFS265QD7VT5YKQHDZ23E64",
"comment": "Wallet4",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "jCXkeH6uE6YC3XQJLC53Y1rg0hERpgt8Y/HePQwnSRA=",
- "stprf": "Pk2Nve+Ngpl8Y2ZoFE0yh1jMdKqKCX7QVcynsl1Kb4mo91S/xJ+HCEPV7KCOE4gapMhYEsfmtHxBwmArGcBkJA==",
- "vote": "5B9VGMBD/U/AojL6cDv86DuqhhGGGp1ZMxuqDVE8lZA=",
+ "sel": "edh53NLCG137TTRsS+2kclueYZeIc4PTzdidhuF6GaI=",
+ "stprf": "JeCYutspSNfLF5JTorMYJRZZWRfyhalnOtdnGUjwDitO3jMCUEkNSbxnOSiCvrzX0HW3u6fVvNdkeMb8PCkMVw==",
+ "vote": "Q1sYtPZLuNwwKlugWJ4mF93hQ0LXRW3SGMdasF1M6Ak=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "WVJHEL3VPUI5RR4IBOG3VI6OFEJPELDQ7L2RMSZIUFTC3X5G4H5FI2NOGI",
+ "addr": "N4RUXZRMKCYDCS5GJN5O2PHUGPXLZMJHPVLY2FZIOIIGA2AC7B527UVUOM",
"comment": "Wallet5",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "SRTJ0EUewq8ZQKMzygOm4b10HYchjg7xWTHGdKfZYig=",
- "stprf": "rsvC8L2Ko+4LStTjvVEq88RnKNp8iHff67r3F7YqKTnk4UiwgsDblvsdQoT6BfMJ7f4N8x1ORgw6trGf1VHNjw==",
- "vote": "lpXIAEYxDEB7hNdvdf+uCt7SktCFIOhejrmHL0mjg34=",
+ "sel": "VzxRlBfsfzHUu0yAx8Zb4opAQuqlZI2nUzJfjsIVNxQ=",
+ "stprf": "zOmx+bEu0a6oj9IM2zPYCQFRUh8UpRCjFVTpVgJnLx8RzCVIqSERXXBbMjjMtNXjx8v00HQHvpiL43BJdnMbSw==",
+ "vote": "gri5EKfbO1pfEV9U5igemysIm2Izt2VoVz+wpFJ/HkM=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "5TM5MD5FZBKNY3SKH7GYPIUAJ7PXIERXFLQ4O4FNL7OLUD6MHNJN4ZAB6Y",
+ "addr": "7RYQMCYEYTXKTTFYZJUEMLUBI6A62IZZCKG55FU2HDARLQP6YIXQFYAOK4",
"comment": "Wallet6",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "L9oncIYILrFOKQZTtYUT7VhVxs4oJSsalLQDEmKnfHw=",
- "stprf": "2nBNjnteR7en0E6/9oSy1YiEAS0QDgI0kE2nhJPBo1Ehw//xgcu9GvTaZZBuIMho+56Uol/qCr+HZuOp5Z5bPw==",
- "vote": "unBd7foxxmX6M3tFJiNX5nD9c30/MNvXkub7/0lUHAc=",
+ "sel": "WF9h2kZ8kpnGNowHvzI0DsDnxI7It5pKRSUtf/L0jLU=",
+ "stprf": "3Bfm2AUy1Imhz7Wel9iGurko0f9pXsBKt23IlNPz3OjJ1Fj6vnFThN+GkbCri+7fuH4LApQMpBAaDhrMwhawMQ==",
+ "vote": "fdqT1wFH/i3y8UnfMijMYNT+u1keS6XL49KNUEPME14=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "H3RSJSIQYCM5BIBDHHRSGAL2O2NVXV55F4HHTIDZKI2UZIEASLGH6B3STU",
+ "addr": "J32FISOXFYENMWGAF7MP7H2YJW7EJKW7P3V4QIURMQQCJKTQHNXG7GNHZI",
"comment": "Wallet7",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "E1pGwlUFBmi1WvSV3NO3UbkbwotKFZlG1bR+gGBmPpo=",
- "stprf": "sd0gkoXKFsqLqimLDP0DfWBOqc7h9gmHzZEB/o932f7X93KCZhvlDvgOSbgU03LjP8Bn+7H0CpuL/TXi7SoiCg==",
- "vote": "G4N2xDms0d+MIjaTVubbNYXiK4Ef7kbQAC1praFOHGA=",
+ "sel": "9efQXXxQV/A50mE0gNAc4HpyoYsV9dXRvuHOO3p8K3o=",
+ "stprf": "f/2U9UObeBbzccPXQ4Lt2u+kh/vAvGP5OS2qFBhN5tGuhPFzffUp4/NP9VDK3tSYk6KCxKCW1w2W0gF6lJYXRQ==",
+ "vote": "xEsok6UlkLROXbbugLAaU4bnrpuhY6KXoOOtn8gJWqk=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "NJFY7INZLZVXAHAIEIR2VYTDXMBLAYKHQO3APX4RRTFN2WRUW6JWJ6L6RA",
+ "addr": "USX63FETLP5RVMG7IXVQBH225AJEJRZOKI4T4TM3S6A67UFKO5XTXO3PFM",
"comment": "Wallet8",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "wGveX8ZZCA2YY4mvgGAkrGw4TuTki9aWf3bOAuLblhw=",
- "stprf": "llVw21zZ7lsxQ99EPz3FVMUIfy3vkBemly60BZv9HURxjMvqNHC5XIm1slN339IHC54t1WXt5YMlzqGxTNEBjw==",
- "vote": "mBNaxPrxkhReW3O+yYFdsBdhh642N2lbyQqAWFpkUsc=",
+ "sel": "dIOwdiHgEt9fHKIHd3OX0jsh5HciLdx99NlAzUEPzdM=",
+ "stprf": "xAkeR9Patvg+u+RnxEk+TzBP9r28Gp0nRZawUtNssm+ggUvRmDNJX5lpAIEPBIY7kBOP8l9pckvW3giFpB5dnA==",
+ "vote": "gfOV2FujKyH3ny1EgNqy232nZ/csR4dpqZBO1zu6c3s=",
"voteKD": 10000,
"voteLst": 3000000
}
},
{
- "addr": "AQ4PRMUOLU26M3XSGGT7UIYWICBV3DCB7FYKSKPAXYBSOFEDBHPFAEOPXE",
+ "addr": "JWDS5OT7OSJQCRHDYDL2ATXWDFY6MPVAWWR4KLMG5H36MCBAE4RVSTYT6I",
"comment": "Wallet9",
"state": {
"algo": 250000000000000,
"onl": 1,
- "sel": "BI3DwXKQrnqxulSpUVGGtz4TBKh0RC/qbpSqDdZXFhc=",
- "stprf": "0XeVxzG/voI2z0Imz79sN5CHI3U0P7ljlTpMLqODE5FaDCkB0s9vkACKiTQfNZCbfQl+20seL/7cyOOhF+OOVA==",
- "vote": "5zFeVWFVN2huVvdsmYt0vhlFuggwpfY8QFGLcWbkGDg=",
+ "sel": "5sUPjLTWyfymPQ1b/i8JN04hZb0K7aH8nq9s2eKQ4Wk=",
+ "stprf": "T9yUBs3Bux+gqE8Xs+nb4GxCWYTGVVr+kzuDyf9t32kfxToCRCmJMGUy38j5cnmLDqY8zykMsb31j1W6fa36Hg==",
+ "vote": "+q+pq8XSpqoVKD7xvvCIZ+NBei/+JUcF2rIG7iSasXc=",
"voteKD": 10000,
"voteLst": 3000000
}
@@ -308,6 +350,6 @@
"fees": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
"id": "v1",
"network": "alphanet",
- "proto": "alpha1",
+ "proto": "alpha4",
"rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
}
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 264687b20..9b8d41828 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -23,6 +23,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "math"
"strings"
"time"
@@ -50,6 +51,8 @@ type accountsDbQueries struct {
lookupStmt *sql.Stmt
lookupResourcesStmt *sql.Stmt
lookupAllResourcesStmt *sql.Stmt
+ lookupKvPairStmt *sql.Stmt
+ lookupKeysByRangeStmt *sql.Stmt
lookupCreatorStmt *sql.Stmt
}
@@ -129,6 +132,12 @@ var createResourcesTable = []string{
PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID`,
}
+var createBoxTable = []string{
+ `CREATE TABLE IF NOT EXISTS kvstore (
+ key blob primary key,
+ value blob)`,
+}
+
var createOnlineAccountsTable = []string{
`CREATE TABLE IF NOT EXISTS onlineaccounts (
address BLOB NOT NULL,
@@ -168,6 +177,7 @@ var accountsResetExprs = []string{
`DROP TABLE IF EXISTS acctrounds`,
`DROP TABLE IF EXISTS accounttotals`,
`DROP TABLE IF EXISTS accountbase`,
+ `DROP TABLE IF EXISTS kvstore`,
`DROP TABLE IF EXISTS assetcreators`,
`DROP TABLE IF EXISTS storedcatchpoints`,
`DROP TABLE IF EXISTS catchpointstate`,
@@ -183,7 +193,7 @@ var accountsResetExprs = []string{
// accountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
-var accountDBVersion = int32(7)
+var accountDBVersion = int32(9)
// persistedAccountData is used for representing a single account stored on the disk. In addition to the
// basics.AccountData, it also stores complete referencing information used to maintain the base accounts
@@ -262,6 +272,15 @@ func (prd *persistedResourcesData) AccountResource() ledgercore.AccountResource
return ret
}
+//msgp:ignore persistedKVData
+type persistedKVData struct {
+ // kv value
+ value []byte
+ // the round number that is associated with the kv value. This field is the corresponding one to the round field
+ // in persistedAccountData, and serves the same purpose.
+ round basics.Round
+}
+
// resourceDelta is used as part of the compactResourcesDeltas to describe a change to a single resource.
type resourceDelta struct {
oldResource persistedResourcesData
@@ -354,6 +373,14 @@ const (
catchpointStateCatchpointLookback = catchpointState("catchpointLookback")
)
+// MaxEncodedBaseAccountDataSize is a rough estimate for the worst-case scenario we're going to have of the base account data serialized.
+// this number is verified by the TestEncodedBaseAccountDataSize function.
+const MaxEncodedBaseAccountDataSize = 350
+
+// MaxEncodedBaseResourceDataSize is a rough estimate for the worst-case scenario we're going to have of the base resource data serialized.
+// this number is verified by the TestEncodedBaseResourceSize function.
+const MaxEncodedBaseResourceDataSize = 20000
+
// normalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables.
type normalizedAccountBalance struct {
// The public key address to which the account belongs.
@@ -451,15 +478,10 @@ func prepareNormalizedBalancesV6(bals []encodedBalanceRecordV6, proto config.Con
if err != nil {
return nil, err
}
- var ctype basics.CreatableType
- if resData.IsAsset() {
- ctype = basics.AssetCreatable
- } else if resData.IsApp() {
- ctype = basics.AppCreatable
- } else {
- err = fmt.Errorf("unknown creatable for addr %s, aidx %d, data %v", balance.Address.String(), cidx, resData)
+ normalizedAccountBalances[i].accountHashes[curHashIdx], err = resourcesHashBuilderV6(&resData, balance.Address, basics.CreatableIndex(cidx), resData.UpdateRound, res)
+ if err != nil {
+ return nil, err
}
- normalizedAccountBalances[i].accountHashes[curHashIdx] = resourcesHashBuilderV6(balance.Address, basics.CreatableIndex(cidx), ctype, resData.UpdateRound, res)
normalizedAccountBalances[i].resources[basics.CreatableIndex(cidx)] = resData
normalizedAccountBalances[i].encodedResources[basics.CreatableIndex(cidx)] = res
curHashIdx++
@@ -1096,14 +1118,14 @@ func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, bals []nor
if resData.IsOwning() {
// determine if it's an asset
if resData.IsAsset() {
- _, err := insertCreatorsStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AssetCreatable)
+ _, err := insertCreatorsStmt.ExecContext(ctx, aidx, balance.address[:], basics.AssetCreatable)
if err != nil {
return err
}
}
// determine if it's an application
if resData.IsApp() {
- _, err := insertCreatorsStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AppCreatable)
+ _, err := insertCreatorsStmt.ExecContext(ctx, aidx, balance.address[:], basics.AppCreatable)
if err != nil {
return err
}
@@ -1114,6 +1136,36 @@ func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, bals []nor
return nil
}
+// writeCatchpointStagingKVs inserts all the KVs in the provided array into the
+// catchpoint kvstore staging table catchpointkvstore, and their hashes to the pending
+func writeCatchpointStagingKVs(ctx context.Context, tx *sql.Tx, kvrs []encodedKVRecordV6) error {
+ insertKV, err := tx.PrepareContext(ctx, "INSERT INTO catchpointkvstore(key, value) VALUES(?, ?)")
+ if err != nil {
+ return err
+ }
+ defer insertKV.Close()
+
+ insertHash, err := tx.PrepareContext(ctx, "INSERT INTO catchpointpendinghashes(data) VALUES(?)")
+ if err != nil {
+ return err
+ }
+ defer insertHash.Close()
+
+ for _, kvr := range kvrs {
+ _, err := insertKV.ExecContext(ctx, kvr.Key, kvr.Value)
+ if err != nil {
+ return err
+ }
+
+ hash := kvHashBuilderV6(string(kvr.Key), kvr.Value)
+ _, err = insertHash.ExecContext(ctx, hash)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup bool) (err error) {
s := []string{
"DROP TABLE IF EXISTS catchpointbalances",
@@ -1121,6 +1173,7 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
"DROP TABLE IF EXISTS catchpointaccounthashes",
"DROP TABLE IF EXISTS catchpointpendinghashes",
"DROP TABLE IF EXISTS catchpointresources",
+ "DROP TABLE IF EXISTS catchpointkvstore",
"DELETE FROM accounttotals where id='catchpointStaging'",
}
@@ -1141,6 +1194,8 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
"CREATE TABLE IF NOT EXISTS catchpointpendinghashes (data blob)",
"CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)",
"CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID",
+ "CREATE TABLE IF NOT EXISTS catchpointkvstore (key blob primary key, value blob)",
+
createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ?
createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"),
)
@@ -1164,11 +1219,13 @@ func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRou
"DROP TABLE IF EXISTS assetcreators",
"DROP TABLE IF EXISTS accounthashes",
"DROP TABLE IF EXISTS resources",
+ "DROP TABLE IF EXISTS kvstore",
"ALTER TABLE catchpointbalances RENAME TO accountbase",
"ALTER TABLE catchpointassetcreators RENAME TO assetcreators",
"ALTER TABLE catchpointaccounthashes RENAME TO accounthashes",
"ALTER TABLE catchpointresources RENAME TO resources",
+ "ALTER TABLE catchpointkvstore RENAME TO kvstore",
}
for _, stmt := range stmts {
@@ -1353,6 +1410,26 @@ func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
return nil
}
+// accountsCreateBoxTable creates the KVStore table for box-storage in the database.
+func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
+ var exists bool
+ err := tx.QueryRow("SELECT 1 FROM pragma_table_info('kvstore') WHERE name='key'").Scan(&exists)
+ if err == nil {
+ // already exists
+ return nil
+ }
+ if err != sql.ErrNoRows {
+ return err
+ }
+ for _, stmt := range createBoxTable {
+ _, err = tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
for _, stmt := range createTxTailTable {
_, err = tx.ExecContext(ctx, stmt)
@@ -1418,6 +1495,8 @@ type baseAccountData struct {
TotalAssets uint64 `codec:"j"`
TotalAppParams uint64 `codec:"k"`
TotalAppLocalStates uint64 `codec:"l"`
+ TotalBoxes uint64 `codec:"m"`
+ TotalBoxBytes uint64 `codec:"n"`
baseVotingData
@@ -1442,6 +1521,8 @@ func (ba *baseAccountData) IsEmpty() bool {
ba.TotalAssets == 0 &&
ba.TotalAppParams == 0 &&
ba.TotalAppLocalStates == 0 &&
+ ba.TotalBoxes == 0 &&
+ ba.TotalBoxBytes == 0 &&
ba.baseVotingData.IsEmpty()
}
@@ -1462,6 +1543,8 @@ func (ba *baseAccountData) SetCoreAccountData(ad *ledgercore.AccountData) {
ba.TotalAssets = ad.TotalAssets
ba.TotalAppParams = ad.TotalAppParams
ba.TotalAppLocalStates = ad.TotalAppLocalStates
+ ba.TotalBoxes = ad.TotalBoxes
+ ba.TotalBoxBytes = ad.TotalBoxBytes
ba.baseVotingData.SetCoreAccountData(ad)
}
@@ -1479,6 +1562,8 @@ func (ba *baseAccountData) SetAccountData(ad *basics.AccountData) {
ba.TotalAssets = uint64(len(ad.Assets))
ba.TotalAppParams = uint64(len(ad.AppParams))
ba.TotalAppLocalStates = uint64(len(ad.AppLocalStates))
+ ba.TotalBoxes = ad.TotalBoxes
+ ba.TotalBoxBytes = ad.TotalBoxBytes
ba.baseVotingData.VoteID = ad.VoteID
ba.baseVotingData.SelectionID = ad.SelectionID
@@ -1511,6 +1596,8 @@ func (ba *baseAccountData) GetLedgerCoreAccountBaseData() ledgercore.AccountBase
TotalAppLocalStates: ba.TotalAppLocalStates,
TotalAssetParams: ba.TotalAssetParams,
TotalAssets: ba.TotalAssets,
+ TotalBoxes: ba.TotalBoxes,
+ TotalBoxBytes: ba.TotalBoxBytes,
}
}
@@ -1537,6 +1624,8 @@ func (ba *baseAccountData) GetAccountData() basics.AccountData {
NumByteSlice: ba.TotalAppSchemaNumByteSlice,
},
TotalExtraAppPages: ba.TotalExtraAppPages,
+ TotalBoxes: ba.TotalBoxes,
+ TotalBoxBytes: ba.TotalBoxBytes,
VoteID: ba.VoteID,
SelectionID: ba.SelectionID,
@@ -2165,13 +2254,24 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
return fmt.Errorf("latest block header %d cannot be retrieved : %w", dbRound, err)
}
- maxTxnLife := basics.Round(config.Consensus[latestHdr.CurrentProtocol].MaxTxnLife)
- deeperBlockHistory := basics.Round(config.Consensus[latestHdr.CurrentProtocol].DeeperBlockHeaderHistory)
- firstRound := (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory)
+ proto := config.Consensus[latestHdr.CurrentProtocol]
+ maxTxnLife := basics.Round(proto.MaxTxnLife)
+ deeperBlockHistory := basics.Round(proto.DeeperBlockHeaderHistory)
+ // firstRound is either maxTxnLife + deeperBlockHistory back from the latest for regular init
+ // or maxTxnLife + deeperBlockHistory + CatchpointLookback back for catchpoint apply.
+ // Try to check the earliest available and start from there.
+ firstRound := (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory + basics.Round(proto.CatchpointLookback))
// we don't need to have the txtail for round 0.
if firstRound == basics.Round(0) {
firstRound++
}
+ if _, err := blockGet(blockTx, firstRound); err != nil {
+ // looks like not catchpoint but a regular migration, start from maxTxnLife + deeperBlockHistory back
+ firstRound = (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory)
+ if firstRound == basics.Round(0) {
+ firstRound++
+ }
+ }
tailRounds := make([][]byte, 0, maxTxnLife)
for rnd := firstRound; rnd <= dbRound; rnd++ {
blk, err := blockGet(blockTx, rnd)
@@ -2526,6 +2626,16 @@ func accountsInitDbQueries(q db.Queryable) (*accountsDbQueries, error) {
return nil, err
}
+ qs.lookupKvPairStmt, err = q.Prepare("SELECT acctrounds.rnd, kvstore.value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ if err != nil {
+ return nil, err
+ }
+
+ qs.lookupKeysByRangeStmt, err = q.Prepare("SELECT acctrounds.rnd, kvstore.key FROM acctrounds LEFT JOIN kvstore ON kvstore.key >= ? AND kvstore.key < ? WHERE id='acctbase'")
+ if err != nil {
+ return nil, err
+ }
+
qs.lookupCreatorStmt, err = q.Prepare("SELECT acctrounds.rnd, assetcreators.creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'")
if err != nil {
return nil, err
@@ -2588,6 +2698,108 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes
return
}
+// sql.go has the following contradictory comments:
+
+// Reference types such as []byte are only valid until the next call to Scan
+// and should not be retained. Their underlying memory is owned by the driver.
+// If retention is necessary, copy their values before the next call to Scan.
+
+// If a dest argument has type *[]byte, Scan saves in that argument a
+// copy of the corresponding data. The copy is owned by the caller and
+// can be modified and held indefinitely. The copy can be avoided by
+// using an argument of type *RawBytes instead; see the documentation
+// for RawBytes for restrictions on its use.
+
+// After check source code, a []byte slice destination is definitely cloned.
+
+func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedKVData, err error) {
+ err = db.Retry(func() error {
+ var val []byte
+ // Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
+ err := qs.lookupKvPairStmt.QueryRow([]byte(key)).Scan(&pv.round, &val)
+ if err != nil {
+ // this should never happen; it indicates that we don't have a current round in the acctrounds table.
+ if err == sql.ErrNoRows {
+ // Return the zero value of data
+ err = fmt.Errorf("unable to query value for key %v : %w", key, err)
+ }
+ return err
+ }
+ if val != nil { // We got a non-null value, so it exists
+ pv.value = val
+ return nil
+ }
+ // we don't have that key, just return pv with the database round (pv.value==nil)
+ return nil
+ })
+ return
+}
+
+// keyPrefixIntervalPreprocessing is implemented to generate an interval for DB queries that look up keys by prefix.
+// Such DB query was designed this way, to trigger the binary search optimization in SQLITE3.
+// The DB comparison for blob typed primary key is lexicographic, i.e., byte by byte.
+// In this way, we can introduce an interval that a primary key should be >= some prefix, < some prefix increment.
+// A corner case to consider is that, the prefix has last byte 0xFF, or the prefix is full of 0xFF.
+// - The first case can be solved by carrying, e.g., prefix = 0x1EFF -> interval being >= 0x1EFF and < 0x1F
+// - The second case can be solved by disregarding the upper limit, i.e., prefix = 0xFFFF -> interval being >= 0xFFFF
+// Another corner case to consider is empty byte, []byte{} or nil.
+// - In both cases, the results are interval >= "", i.e., returns []byte{} for prefix, and nil for prefixIncr.
+func keyPrefixIntervalPreprocessing(prefix []byte) ([]byte, []byte) {
+ if prefix == nil {
+ prefix = []byte{}
+ }
+ prefixIncr := make([]byte, len(prefix))
+ copy(prefixIncr, prefix)
+ for i := len(prefix) - 1; i >= 0; i-- {
+ currentByteIncr := int(prefix[i]) + 1
+ if currentByteIncr > 0xFF {
+ prefixIncr = prefixIncr[:len(prefixIncr)-1]
+ continue
+ }
+ prefixIncr[i] = byte(currentByteIncr)
+ return prefix, prefixIncr
+ }
+ return prefix, nil
+}
+
+func (qs *accountsDbQueries) lookupKeysByPrefix(prefix string, maxKeyNum uint64, results map[string]bool, resultCount uint64) (round basics.Round, err error) {
+ start, end := keyPrefixIntervalPreprocessing([]byte(prefix))
+ if end == nil {
+ // Not an expected use case, it's asking for all keys, or all keys
+ // prefixed by some number of 0xFF bytes.
+ return 0, fmt.Errorf("Lookup by strange prefix %#v", prefix)
+ }
+ err = db.Retry(func() error {
+ var rows *sql.Rows
+ rows, err = qs.lookupKeysByRangeStmt.Query(start, end)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ var v sql.NullString
+
+ for rows.Next() {
+ if resultCount == maxKeyNum {
+ return nil
+ }
+ err = rows.Scan(&round, &v)
+ if err != nil {
+ return err
+ }
+ if v.Valid {
+ if _, ok := results[v.String]; ok {
+ continue
+ }
+ results[v.String] = true
+ resultCount++
+ }
+ }
+ return nil
+ })
+ return
+}
+
func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (addr basics.Address, ok bool, dbRound basics.Round, err error) {
err = db.Retry(func() error {
var buf []byte
@@ -2919,6 +3131,8 @@ func (qs *accountsDbQueries) close() {
&qs.lookupStmt,
&qs.lookupResourcesStmt,
&qs.lookupAllResourcesStmt,
+ &qs.lookupKvPairStmt,
+ &qs.lookupKeysByRangeStmt,
&qs.lookupCreatorStmt,
}
for _, preparedQuery := range preparedQueries {
@@ -3131,6 +3345,9 @@ type accountsWriter interface {
deleteResource(addrid int64, aidx basics.CreatableIndex) (rowsAffected int64, err error)
updateResource(addrid int64, aidx basics.CreatableIndex, data resourcesData) (rowsAffected int64, err error)
+ upsertKvPair(key string, value []byte) error
+ deleteKvPair(key string) error
+
insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error)
deleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error)
@@ -3147,6 +3364,7 @@ type accountsSQLWriter struct {
insertCreatableIdxStmt, deleteCreatableIdxStmt *sql.Stmt
deleteByRowIDStmt, insertStmt, updateStmt *sql.Stmt
deleteResourceStmt, insertResourceStmt, updateResourceStmt *sql.Stmt
+ deleteKvPairStmt, upsertKvPairStmt *sql.Stmt
}
type onlineAccountsSQLWriter struct {
@@ -3154,38 +3372,21 @@ type onlineAccountsSQLWriter struct {
}
func (w *accountsSQLWriter) close() {
- if w.deleteByRowIDStmt != nil {
- w.deleteByRowIDStmt.Close()
- w.deleteByRowIDStmt = nil
- }
- if w.insertStmt != nil {
- w.insertStmt.Close()
- w.insertStmt = nil
- }
- if w.updateStmt != nil {
- w.updateStmt.Close()
- w.updateStmt = nil
+ // Formatted to match the type definition above
+ preparedStmts := []**sql.Stmt{
+ &w.insertCreatableIdxStmt, &w.deleteCreatableIdxStmt,
+ &w.deleteByRowIDStmt, &w.insertStmt, &w.updateStmt,
+ &w.deleteResourceStmt, &w.insertResourceStmt, &w.updateResourceStmt,
+ &w.deleteKvPairStmt, &w.upsertKvPairStmt,
}
- if w.deleteResourceStmt != nil {
- w.deleteResourceStmt.Close()
- w.deleteResourceStmt = nil
- }
- if w.insertResourceStmt != nil {
- w.insertResourceStmt.Close()
- w.insertResourceStmt = nil
- }
- if w.updateResourceStmt != nil {
- w.updateResourceStmt.Close()
- w.updateResourceStmt = nil
- }
- if w.insertCreatableIdxStmt != nil {
- w.insertCreatableIdxStmt.Close()
- w.insertCreatableIdxStmt = nil
- }
- if w.deleteCreatableIdxStmt != nil {
- w.deleteCreatableIdxStmt.Close()
- w.deleteCreatableIdxStmt = nil
+
+ for _, stmt := range preparedStmts {
+ if (*stmt) != nil {
+ (*stmt).Close()
+ *stmt = nil
+ }
}
+
}
func (w *onlineAccountsSQLWriter) close() {
@@ -3195,7 +3396,7 @@ func (w *onlineAccountsSQLWriter) close() {
}
}
-func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts bool, hasResources bool, hasCreatables bool) (w *accountsSQLWriter, err error) {
+func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (w *accountsSQLWriter, err error) {
w = new(accountsSQLWriter)
if hasAccounts {
@@ -3232,6 +3433,18 @@ func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts bool, hasResources bool, hasC
}
}
+ if hasKvPairs {
+ w.upsertKvPairStmt, err = tx.Prepare("INSERT INTO kvstore (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value=excluded.value")
+ if err != nil {
+ return
+ }
+
+ w.deleteKvPairStmt, err = tx.Prepare("DELETE FROM kvstore WHERE key=?")
+ if err != nil {
+ return
+ }
+ }
+
if hasCreatables {
w.insertCreatableIdxStmt, err = tx.Prepare("INSERT INTO assetcreators (asset, creator, ctype) VALUES (?, ?, ?)")
if err != nil {
@@ -3300,6 +3513,31 @@ func (w accountsSQLWriter) updateResource(addrid int64, aidx basics.CreatableInd
return
}
+func (w accountsSQLWriter) upsertKvPair(key string, value []byte) error {
+ // NOTE! If we are passing in `string`, then for `BoxKey` case,
+ // we might contain 0-byte in boxKey, coming from uint64 appID.
+ // The consequence would be DB key write in be cut off after such 0-byte.
+ // Casting `string` to `[]byte` avoids such trouble, and test:
+ // - `TestBoxNamesByAppIDs` in `acctupdates_test`
+ // relies on such modification.
+ result, err := w.upsertKvPairStmt.Exec([]byte(key), value)
+ if err != nil {
+ return err
+ }
+ _, err = result.LastInsertId()
+ return err
+}
+
+func (w accountsSQLWriter) deleteKvPair(key string) error {
+ // Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
+ result, err := w.deleteKvPairStmt.Exec([]byte(key))
+ if err != nil {
+ return err
+ }
+ _, err = result.RowsAffected()
+ return err
+}
+
func (w accountsSQLWriter) insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) {
result, err := w.insertCreatableIdxStmt.Exec(cidx, creator, ctype)
if err != nil {
@@ -3348,20 +3586,21 @@ func (w onlineAccountsSQLWriter) insertOnlineAccount(addr basics.Address, normBa
// accountsNewRound is a convenience wrapper for accountsNewRoundImpl
func accountsNewRound(
tx *sql.Tx,
- updates compactAccountDeltas, resources compactResourcesDeltas, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
+ updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
hasAccounts := updates.len() > 0
hasResources := resources.len() > 0
+ hasKvPairs := len(kvPairs) > 0
hasCreatables := len(creatables) > 0
- writer, err := makeAccountsSQLWriter(tx, hasAccounts, hasResources, hasCreatables)
+ writer, err := makeAccountsSQLWriter(tx, hasAccounts, hasResources, hasKvPairs, hasCreatables)
if err != nil {
return
}
defer writer.close()
- return accountsNewRoundImpl(writer, updates, resources, creatables, proto, lastUpdateRound)
+ return accountsNewRoundImpl(writer, updates, resources, kvPairs, creatables, proto, lastUpdateRound)
}
func onlineAccountsNewRound(
@@ -3385,10 +3624,9 @@ func onlineAccountsNewRound(
// The function returns a persistedAccountData for the modified accounts which can be stored in the base cache.
func accountsNewRoundImpl(
writer accountsWriter,
- updates compactAccountDeltas, resources compactResourcesDeltas, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
+ updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
-
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
updatedAccounts = make([]persistedAccountData, updates.len())
updatedAccountIdx := 0
newAddressesRowIDs := make(map[basics.Address]int64)
@@ -3586,16 +3824,35 @@ func accountsNewRoundImpl(
}
}
- if len(creatables) > 0 {
- for cidx, cdelta := range creatables {
- if cdelta.Created {
- _, err = writer.insertCreatable(cidx, cdelta.Ctype, cdelta.Creator[:])
- } else {
- _, err = writer.deleteCreatable(cidx, cdelta.Ctype)
+ updatedKVs = make(map[string]persistedKVData, len(kvPairs))
+ for key, mv := range kvPairs {
+ if mv.data != nil {
+ // reminder: check oldData for nil here, b/c bytes.Equal conflates nil and "".
+ if mv.oldData != nil && bytes.Equal(mv.oldData, mv.data) {
+ continue // changed back within the delta span
}
- if err != nil {
- return
+ err = writer.upsertKvPair(key, mv.data)
+ updatedKVs[key] = persistedKVData{value: mv.data, round: lastUpdateRound}
+ } else {
+ if mv.oldData == nil { // Came and went within the delta span
+ continue
}
+ err = writer.deleteKvPair(key)
+ updatedKVs[key] = persistedKVData{value: nil, round: lastUpdateRound}
+ }
+ if err != nil {
+ return
+ }
+ }
+
+ for cidx, cdelta := range creatables {
+ if cdelta.Created {
+ _, err = writer.insertCreatable(cidx, cdelta.Ctype, cdelta.Creator[:])
+ } else {
+ _, err = writer.deleteCreatable(cidx, cdelta.Ctype)
+ }
+ if err != nil {
+ return
}
}
@@ -3864,6 +4121,16 @@ func totalAccounts(ctx context.Context, tx *sql.Tx) (total uint64, err error) {
return
}
+func totalKVs(ctx context.Context, tx *sql.Tx) (total uint64, err error) {
+ err = tx.QueryRowContext(ctx, "SELECT count(1) FROM kvstore").Scan(&total)
+ if err == sql.ErrNoRows {
+ total = 0
+ err = nil
+ return
+ }
+ return
+}
+
// reencodeAccounts reads all the accounts in the accountbase table, decode and reencode the account data.
// if the account data is found to have a different encoding, it would update the encoded account on disk.
// on return, it returns the number of modified accounts as well as an error ( if we had any )
@@ -3941,6 +4208,7 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
}
// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database.
+//
//msgp:ignore MerkleCommitter
type MerkleCommitter struct {
tx *sql.Tx
@@ -4111,8 +4379,9 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
iterator.Close()
return
}
- // we just finished reading the table.
- iterator.Close()
+ // Do not Close() the iterator here. It is the caller's responsibility to
+ // do so, signalled by the return of an empty chunk. If we Close() here, the
+ // next call to Next() will start all over!
return
}
@@ -4129,6 +4398,7 @@ func (iterator *encodedAccountsBatchIter) Close() {
}
// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
+//
//msgp:ignore orderedAccountsIterStep
type orderedAccountsIterStep int
@@ -4165,18 +4435,16 @@ type orderedAccountsIter struct {
pendingBaseRow pendingBaseRow
pendingResourceRow pendingResourceRow
accountCount int
- resourceCount int
insertStmt *sql.Stmt
}
// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
-func makeOrderedAccountsIter(tx *sql.Tx, accountCount int, resourceCount int) *orderedAccountsIter {
+func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
return &orderedAccountsIter{
- tx: tx,
- accountCount: accountCount,
- resourceCount: resourceCount,
- step: oaiStepStartup,
+ tx: tx,
+ accountCount: accountCount,
+ step: oaiStepStartup,
}
}
@@ -4553,28 +4821,22 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
}
resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error {
- var err error
if resData != nil {
- var ctype basics.CreatableType
- if resData.IsAsset() {
- ctype = basics.AssetCreatable
- } else if resData.IsApp() {
- ctype = basics.AppCreatable
- } else {
- err = fmt.Errorf("unknown creatable for addr %s, aidx %d, data %v", addr.String(), cidx, resData)
+ hash, err := resourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData)
+ if err != nil {
return err
}
- hash := resourcesHashBuilderV6(addr, cidx, ctype, resData.UpdateRound, encodedResourceData)
_, err = iterator.insertStmt.ExecContext(ctx, lastAddrID, hash)
+ return err
}
- return err
+ return nil
}
count := 0
count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
iterator.accountBaseRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, iterator.resourceCount,
+ iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, math.MaxInt,
)
if err != nil {
iterator.Close(ctx)
@@ -4770,6 +5032,12 @@ func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
return prd.round < other.round
}
+// before compares the round numbers of two persistedKVData and determines if the current persistedKVData
+// happened before the other.
+func (prd persistedKVData) before(other *persistedKVData) bool {
+ return prd.round < other.round
+}
+
// before compares the round numbers of two persistedAccountData and determines if the current persistedAccountData
// happened before the other.
func (pac *persistedOnlineAccountData) before(other *persistedOnlineAccountData) bool {
@@ -4896,6 +5164,11 @@ type catchpointFirstStageInfo struct {
// Total number of accounts in the catchpoint data file. Only set when catchpoint
// data files are generated.
TotalAccounts uint64 `codec:"accountsCount"`
+
+ // Total number of accounts in the catchpoint data file. Only set when catchpoint
+ // data files are generated.
+ TotalKVs uint64 `codec:"kvsCount"`
+
// Total number of chunks in the catchpoint data file. Only set when catchpoint
// data files are generated.
TotalChunks uint64 `codec:"chunksCount"`
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 5491cb21d..781c7300c 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -24,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math"
"math/rand"
"os"
"reflect"
@@ -33,6 +34,8 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
@@ -83,6 +86,9 @@ func accountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address
err = performOnlineRoundParamsTailMigration(context.Background(), tx, db.Accessor{}, true, proto)
require.NoError(tb, err)
+ err = accountsCreateBoxTable(context.Background(), tx)
+ require.NoError(tb, err)
+
return newDB
}
@@ -102,7 +108,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
pad, err := aq.lookup(addr)
require.NoError(t, err)
d := pad.accountData.GetLedgerCoreAccountData()
- require.Equal(t, d, expected)
+ require.Equal(t, expected, d)
switch d.Status {
case basics.Online:
@@ -309,7 +315,7 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
- updatedAccts, updatesResources, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, ctbsWithDeletes, proto, basics.Round(i))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
numResUpdates := 0
@@ -317,6 +323,7 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
+ require.Empty(t, updatedKVs)
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
@@ -446,7 +453,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses)
require.NoError(t, err)
- updatedAccts, updatesResources, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, proto, basics.Round(lastRound))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
require.NoError(t, err)
require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts
require.Equal(t,
@@ -459,6 +466,8 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
persistedResourcesData{addrid: 0, aidx: 100, data: makeResourcesData(0), round: basics.Round(lastRound)},
updatesResources[addr][0],
)
+
+ require.Empty(t, updatedKVs)
})
}
}
@@ -524,10 +533,12 @@ func checkCreatables(t *testing.T,
// randomCreatableSampling sets elements to delete from previous iteration
// It consideres 10 elements in an iteration.
// loop 0: returns the first 10 elements
-// loop 1: returns: * the second 10 elements
-// * random sample of elements from the first 10: created changed from true -> false
-// loop 2: returns: * the elements 20->30
-// * random sample of elements from 10->20: created changed from true -> false
+// loop 1: returns:
+// - the second 10 elements
+// - random sample of elements from the first 10: created changed from true -> false
+// loop 2: returns:
+// - the elements 20->30
+// - random sample of elements from 10->20: created changed from true -> false
func randomCreatableSampling(iteration int, crtbsList []basics.CreatableIndex,
creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
@@ -717,6 +728,7 @@ func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
require.NoError(b, err)
+ defer qs.close()
// read all the balances in the database, shuffled
addrs := make([]basics.Address, len(accounts))
@@ -1015,8 +1027,8 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
last64KSize = chunkSize
last64KAccountCreationTime = time.Duration(0)
}
- var balances catchpointFileBalancesChunkV6
- balances.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ var chunk catchpointFileChunkV6
+ chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{RewardsBase: accountsLoaded + i}
@@ -1026,13 +1038,13 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
if ascendingOrder {
binary.LittleEndian.PutUint64(randomAccount.Address[:], accountsLoaded+i)
}
- balances.Balances[i] = randomAccount
+ chunk.Balances[i] = randomAccount
}
balanceLoopDuration := time.Since(balancesLoopStart)
last64KAccountCreationTime += balanceLoopDuration
accountsGenerationDuration += balanceLoopDuration
- normalizedAccountBalances, err := prepareNormalizedBalancesV6(balances.Balances, proto)
+ normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto)
require.NoError(b, err)
b.StartTimer()
err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -1069,6 +1081,285 @@ func BenchmarkWriteCatchpointStagingBalances(b *testing.B) {
}
}
+func TestKeyPrefixIntervalPreprocessing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testCases := []struct {
+ input []byte
+ outputPrefix []byte
+ outputPrefixIncr []byte
+ }{
+ {input: []byte{0xAB, 0xCD}, outputPrefix: []byte{0xAB, 0xCD}, outputPrefixIncr: []byte{0xAB, 0xCE}},
+ {input: []byte{0xFF}, outputPrefix: []byte{0xFF}, outputPrefixIncr: nil},
+ {input: []byte{0xFE, 0xFF}, outputPrefix: []byte{0xFE, 0xFF}, outputPrefixIncr: []byte{0xFF}},
+ {input: []byte{0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFF}, outputPrefixIncr: nil},
+ {input: []byte{0xAB, 0xCD}, outputPrefix: []byte{0xAB, 0xCD}, outputPrefixIncr: []byte{0xAB, 0xCE}},
+ {input: []byte{0x1E, 0xFF, 0xFF}, outputPrefix: []byte{0x1E, 0xFF, 0xFF}, outputPrefixIncr: []byte{0x1F}},
+ {input: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefixIncr: []byte{0xFF, 0xFF}},
+ {input: []byte{0x00, 0xFF}, outputPrefix: []byte{0x00, 0xFF}, outputPrefixIncr: []byte{0x01}},
+ {input: []byte(string("bx:123")), outputPrefix: []byte(string("bx:123")), outputPrefixIncr: []byte(string("bx:124"))},
+ {input: []byte{}, outputPrefix: []byte{}, outputPrefixIncr: nil},
+ {input: nil, outputPrefix: []byte{}, outputPrefixIncr: nil},
+ {input: []byte{0x1E, 0xFF, 0xFF}, outputPrefix: []byte{0x1E, 0xFF, 0xFF}, outputPrefixIncr: []byte{0x1F}},
+ {input: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefixIncr: []byte{0xFF, 0xFF}},
+ {input: []byte{0x00, 0xFF}, outputPrefix: []byte{0x00, 0xFF}, outputPrefixIncr: []byte{0x01}},
+ }
+ for _, tc := range testCases {
+ actualOutputPrefix, actualOutputPrefixIncr := keyPrefixIntervalPreprocessing(tc.input)
+ require.Equal(t, tc.outputPrefix, actualOutputPrefix)
+ require.Equal(t, tc.outputPrefixIncr, actualOutputPrefixIncr)
+ }
+}
+
+func TestLookupKeysByPrefix(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ dbs, fn := dbOpenTest(t, false)
+ setDbLogging(t, dbs)
+ defer cleanupTestDb(dbs, fn, false)
+
+ // return account data, initialize DB tables from accountsInitTest
+ _ = benchmarkInitBalances(t, 1, dbs, protocol.ConsensusCurrentVersion)
+
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
+ require.NoError(t, err)
+ defer qs.close()
+
+ kvPairDBPrepareSet := []struct {
+ key []byte
+ value []byte
+ }{
+ {key: []byte{0xFF, 0x12, 0x34, 0x56, 0x78}, value: []byte("val0")},
+ {key: []byte{0xFF, 0xFF, 0x34, 0x56, 0x78}, value: []byte("val1")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0x56, 0x78}, value: []byte("val2")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x78}, value: []byte("val3")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, value: []byte("val4")},
+ {key: []byte{0xFF, 0xFE, 0xFF}, value: []byte("val5")},
+ {key: []byte{0xFF, 0xFF, 0x00, 0xFF, 0xFF}, value: []byte("val6")},
+ {key: []byte{0xFF, 0xFF}, value: []byte("should not confuse with 0xFF-0xFE")},
+ {key: []byte{0xBA, 0xDD, 0xAD, 0xFF, 0xFF}, value: []byte("baddadffff")},
+ {key: []byte{0xBA, 0xDD, 0xAE, 0x00}, value: []byte("baddae00")},
+ {key: []byte{0xBA, 0xDD, 0xAE}, value: []byte("baddae")},
+ {key: []byte("TACOCAT"), value: []byte("val6")},
+ {key: []byte("TACOBELL"), value: []byte("2bucks50cents?")},
+ {key: []byte("DingHo-SmallPack"), value: []byte("3bucks75cents")},
+ {key: []byte("DingHo-StandardPack"), value: []byte("5bucks25cents")},
+ {key: []byte("BostonKitchen-CheeseSlice"), value: []byte("3bucks50cents")},
+ {key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`), value: []byte("random Bluh")},
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+
+ // writer is only for kvstore
+ writer, err := makeAccountsSQLWriter(tx, true, true, true, true)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(kvPairDBPrepareSet); i++ {
+ err := writer.upsertKvPair(string(kvPairDBPrepareSet[i].key), kvPairDBPrepareSet[i].value)
+ require.NoError(t, err)
+ }
+
+ err = tx.Commit()
+ require.NoError(t, err)
+ writer.close()
+
+ testCases := []struct {
+ prefix []byte
+ expectedNames [][]byte
+ err string
+ }{
+ {
+ prefix: []byte{0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFE},
+ expectedNames: [][]byte{
+ {0xFF, 0xFE, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xFF, 0xFE, 0xFF},
+ expectedNames: [][]byte{
+ {0xFF, 0xFE, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAD, 0xFF},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAE},
+ {0xBA, 0xDD, 0xAE, 0x00},
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAE},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAE},
+ {0xBA, 0xDD, 0xAE, 0x00},
+ },
+ },
+ {
+ prefix: []byte("TACO"),
+ expectedNames: [][]byte{
+ []byte("TACOCAT"),
+ []byte("TACOBELL"),
+ },
+ },
+ {
+ prefix: []byte("TACOC"),
+ expectedNames: [][]byte{[]byte("TACOCAT")},
+ },
+ {
+ prefix: []byte("DingHo"),
+ expectedNames: [][]byte{
+ []byte("DingHo-SmallPack"),
+ []byte("DingHo-StandardPack"),
+ },
+ },
+ {
+ prefix: []byte("DingHo-S"),
+ expectedNames: [][]byte{
+ []byte("DingHo-SmallPack"),
+ []byte("DingHo-StandardPack"),
+ },
+ },
+ {
+ prefix: []byte("DingHo-Small"),
+ expectedNames: [][]byte{[]byte("DingHo-SmallPack")},
+ },
+ {
+ prefix: []byte("BostonKitchen"),
+ expectedNames: [][]byte{[]byte("BostonKitchen-CheeseSlice")},
+ },
+ {
+ prefix: []byte(`™£´´∂ƒ∂ƒßƒ©`),
+ expectedNames: [][]byte{[]byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`)},
+ },
+ {
+ prefix: []byte{},
+ err: "strange prefix",
+ },
+ }
+
+ for index, testCase := range testCases {
+ t.Run("lookupKVByPrefix-testcase-"+strconv.Itoa(index), func(t *testing.T) {
+ actual := make(map[string]bool)
+ _, err := qs.lookupKeysByPrefix(string(testCase.prefix), uint64(len(kvPairDBPrepareSet)), actual, 0)
+ if err != nil {
+ require.NotEmpty(t, testCase.err, testCase.prefix)
+ require.Contains(t, err.Error(), testCase.err)
+ } else {
+ require.Empty(t, testCase.err)
+ expected := make(map[string]bool)
+ for _, name := range testCase.expectedNames {
+ expected[string(name)] = true
+ }
+ require.Equal(t, actual, expected)
+ }
+ })
+ }
+}
+
+func BenchmarkLookupKeyByPrefix(b *testing.B) {
+ // learn something from BenchmarkWritingRandomBalancesDisk
+
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ defer cleanupTestDb(dbs, fn, false)
+
+ // return account data, initialize DB tables from accountsInitTest
+ _ = benchmarkInitBalances(b, 1, dbs, protocol.ConsensusCurrentVersion)
+
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
+ require.NoError(b, err)
+ defer qs.close()
+
+ currentDBSize := 0
+ nextDBSize := 2
+ increment := 2
+
+ nameBuffer := make([]byte, 5)
+ valueBuffer := make([]byte, 5)
+
+ // from 2^1 -> 2^2 -> ... -> 2^22 sized DB
+ for bIndex := 0; bIndex < 22; bIndex++ {
+ // make writer to DB
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+
+ // writer is only for kvstore
+ writer, err := makeAccountsSQLWriter(tx, true, true, true, true)
+ if err != nil {
+ return
+ }
+
+ var prefix string
+ // how to write to dbs a bunch of stuffs?
+ for i := 0; i < nextDBSize-currentDBSize; i++ {
+ crypto.RandBytes(nameBuffer)
+ crypto.RandBytes(valueBuffer)
+ appID := basics.AppIndex(crypto.RandUint64())
+ boxKey := logic.MakeBoxKey(appID, string(nameBuffer))
+ err = writer.upsertKvPair(boxKey, valueBuffer)
+ require.NoError(b, err)
+
+ if i == 0 {
+ prefix = logic.MakeBoxKey(appID, "")
+ }
+ }
+ err = tx.Commit()
+ require.NoError(b, err)
+ writer.close()
+
+ // benchmark the query against large DB, see if we have O(log N) speed
+ currentDBSize = nextDBSize
+ nextDBSize *= increment
+
+ b.Run("lookupKVByPrefix-DBsize"+strconv.Itoa(currentDBSize), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ results := make(map[string]bool)
+ _, err := qs.lookupKeysByPrefix(prefix, uint64(currentDBSize), results, 0)
+ require.NoError(b, err)
+ require.True(b, len(results) >= 1)
+ }
+ })
+ }
+}
+
// upsert updates existing or inserts a new entry
func (a *compactResourcesDeltas) upsert(delta resourceDelta) {
if idx, exist := a.cache[accountCreatable{address: delta.address, index: delta.oldResource.aidx}]; exist {
@@ -1282,6 +1573,7 @@ func TestCompactResourceDeltas(t *testing.T) {
func TestResourcesDataApp(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
a := require.New(t)
@@ -1326,161 +1618,176 @@ func TestResourcesDataApp(t *testing.T) {
a.Equal(appParamsEmpty, rd.GetAppParams())
a.Equal(appLocalEmpty, rd.GetAppLocalState())
- // check empty states + non-empty params
- appParams := ledgertesting.RandomAppParams()
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- appState := ledgertesting.RandomAppLocalState()
- rd.SetAppLocalState(appState)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appState, rd.GetAppLocalState())
-
- // check ClearAppLocalState
- rd.ClearAppLocalState()
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- // check ClearAppParams
- rd.SetAppLocalState(appState)
- rd.ClearAppParams()
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appState, rd.GetAppLocalState())
-
- // check both clear
- rd.ClearAppLocalState()
- a.False(rd.IsApp())
- a.False(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- // check params clear when non-empty params and empty holding
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- rd.ClearAppParams()
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsApp())
- a.False(rd.IsEmpty())
- a.Equal(rd.ResourceFlags, resourceFlagsEmptyApp)
- rd.ClearAppLocalState()
- a.False(rd.IsApp())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
- a.Equal(rd.ResourceFlags, resourceFlagsNotHolding)
-
- // check migration flow (accountDataResources)
- // 1. both exist and empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParamsEmpty, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 2. both exist and not empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 3. both exist: holding not empty, param is empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- rd.SetAppParams(appParamsEmpty, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 4. both exist: holding empty, param is not empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 5. holding does not exist and params is empty
- rd = makeResourcesData(0)
- rd.SetAppParams(appParamsEmpty, false)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 6. holding does not exist and params is not empty
- rd = makeResourcesData(0)
- rd.SetAppParams(appParams, false)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 7. holding exist and not empty and params does not exist
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 8. both do not exist
- rd = makeResourcesData(0)
- a.False(rd.IsApp())
- a.False(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
+ // Since some steps use randomly generated input, the test is run N times
+ // to cover a larger search space of inputs.
+ for i := 0; i < 1000; i++ {
+ // check empty states + non-empty params
+ appParams := ledgertesting.RandomAppParams()
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ appState := ledgertesting.RandomAppLocalState()
+ rd.SetAppLocalState(appState)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appState, rd.GetAppLocalState())
+
+ // check ClearAppLocalState
+ rd.ClearAppLocalState()
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ // check ClearAppParams
+ rd.SetAppLocalState(appState)
+ rd.ClearAppParams()
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appState, rd.GetAppLocalState())
+
+ // check both clear
+ rd.ClearAppLocalState()
+ a.False(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ // check params clear when non-empty params and empty holding
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ rd.ClearAppParams()
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsApp())
+ a.False(rd.IsEmpty())
+ a.Equal(rd.ResourceFlags, resourceFlagsEmptyApp)
+ rd.ClearAppLocalState()
+ a.False(rd.IsApp())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ a.Equal(rd.ResourceFlags, resourceFlagsNotHolding)
+
+ // check migration flow (accountDataResources)
+ // 1. both exist and empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParamsEmpty, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 2. both exist and not empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 3. both exist: holding not empty, param is empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ rd.SetAppParams(appParamsEmpty, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+
+ // 4. both exist: holding empty, param is not empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 5. holding does not exist and params is empty
+ rd = makeResourcesData(0)
+ rd.SetAppParams(appParamsEmpty, false)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 6. holding does not exist and params is not empty
+ rd = makeResourcesData(0)
+ rd.SetAppParams(appParams, false)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 7. holding exist and not empty and params does not exist
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+ // 8. both do not exist
+ rd = makeResourcesData(0)
+ a.False(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ }
}
func TestResourcesDataAsset(t *testing.T) {
@@ -2334,7 +2641,7 @@ func TestBaseAccountDataIsEmpty(t *testing.T) {
structureTesting := func(t *testing.T) {
encoding, err := json.Marshal(&empty)
zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
- expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"UpdateRound":0}`
+ expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"TotalBoxes":0,"TotalBoxBytes":0,"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"UpdateRound":0}`
require.NoError(t, err)
require.Equal(t, expectedEncoding, string(encoding))
}
@@ -2551,6 +2858,8 @@ type mockAccountWriter struct {
rowids map[int64]basics.Address
resources map[mockResourcesKey]ledgercore.AccountResource
+ kvStore map[string][]byte
+
lastRowid int64
availRowIds []int64
}
@@ -2741,6 +3050,16 @@ func (m *mockAccountWriter) updateResource(addrid int64, aidx basics.CreatableIn
return 1, nil
}
+func (m *mockAccountWriter) upsertKvPair(key string, value []byte) error {
+ m.kvStore[key] = value
+ return nil
+}
+
+func (m *mockAccountWriter) deleteKvPair(key string) error {
+ delete(m.kvStore, key)
+ return nil
+}
+
func (m *mockAccountWriter) insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) {
return 0, fmt.Errorf("insertCreatable: not implemented")
}
@@ -2984,12 +3303,13 @@ func TestAccountUnorderedUpdates(t *testing.T) {
t.Run(fmt.Sprintf("acct-perm-%d|res-perm-%d", i, j), func(t *testing.T) {
a := require.New(t)
mock2 := mock.clone()
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
- &mock2, acctVariant, resVariant, nil, config.ConsensusParams{}, latestRound,
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
+ &mock2, acctVariant, resVariant, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
- a.Equal(3, len(updatedAccounts))
- a.Equal(3, len(updatedResources))
+ a.Len(updatedAccounts, 3)
+ a.Len(updatedResources, 3)
+ a.Empty(updatedKVs)
})
}
}
@@ -3066,12 +3386,13 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(1, len(resDeltas.misses)) // (addr2, aidx) does not exist
a.Equal(2, resDeltas.len()) // (addr1, aidx) found
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
- &mock, acctDeltas, resDeltas, nil, config.ConsensusParams{}, latestRound,
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
+ &mock, acctDeltas, resDeltas, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Equal(3, len(updatedAccounts))
a.Equal(2, len(updatedResources))
+ a.Equal(0, len(updatedKVs))
// one deletion entry for pre-existing account addr1, and one entry for in-memory account addr2
// in base accounts updates and in resources updates
@@ -3095,6 +3416,155 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
}
}
+func BenchmarkLRUResources(b *testing.B) {
+ var baseResources lruResources
+ baseResources.init(nil, 1000, 850)
+
+ var data persistedResourcesData
+ var has bool
+ addrs := make([]basics.Address, 850)
+ for i := 0; i < 850; i++ {
+ data.data.ApprovalProgram = make([]byte, 8096*4)
+ data.aidx = basics.CreatableIndex(1)
+ addrBytes := ([]byte(fmt.Sprintf("%d", i)))[:32]
+ var addr basics.Address
+ for i, b := range addrBytes {
+ addr[i] = b
+ }
+ addrs[i] = addr
+ baseResources.write(data, addr)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pos := i % 850
+ data, has = baseResources.read(addrs[pos], basics.CreatableIndex(1))
+ require.True(b, has)
+ }
+}
+
+func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), error) {
+ batchCount := 100
+ if batchCount > totalBoxes {
+ batchCount = 1
+ }
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ cleanup := func() {
+ cleanupTestDb(dbs, fn, false)
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ _, err = accountsInit(tx, make(map[basics.Address]basics.AccountData), proto)
+ require.NoError(b, err)
+ err = tx.Commit()
+ require.NoError(b, err)
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
+ require.NoError(b, err)
+
+ cnt := 0
+ for batch := 0; batch <= batchCount; batch++ {
+ tx, err = dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
+ require.NoError(b, err)
+ for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
+ err = writer.upsertKvPair(fmt.Sprintf("%d", cnt), make([]byte, boxSize))
+ require.NoError(b, err)
+ cnt++
+ }
+
+ err = tx.Commit()
+ require.NoError(b, err)
+ writer.close()
+ }
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true)
+ return dbs, cleanup, err
+}
+
+func BenchmarkBoxDatabaseRead(b *testing.B) {
+ getBoxNamePermutation := func(totalBoxes int) []int {
+ rand.Seed(time.Now().UnixNano())
+ boxNames := make([]int, totalBoxes)
+ for i := 0; i < totalBoxes; i++ {
+ boxNames[i] = i
+ }
+ rand.Shuffle(len(boxNames), func(x, y int) { boxNames[x], boxNames[y] = boxNames[y], boxNames[x] })
+ return boxNames
+ }
+
+ boxCnt := []int{10, 1000, 100000}
+ boxSizes := []int{2, 2048, 4 * 8096}
+ for _, totalBoxes := range boxCnt {
+ for _, boxSize := range boxSizes {
+ b.Run(fmt.Sprintf("totalBoxes=%d/boxSize=%d", totalBoxes, boxSize), func(b *testing.B) {
+ b.StopTimer()
+
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
+
+ boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+ var v sql.NullString
+ for i := 0; i < b.N; i++ {
+ var pv persistedKVData
+ boxName := boxNames[i%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
+
+ cleanup()
+ })
+ }
+ }
+
+ // test caching performance
+ lookbacks := []int{1, 32, 256, 2048}
+ for _, lookback := range lookbacks {
+ for _, boxSize := range boxSizes {
+ totalBoxes := 100000
+
+ b.Run(fmt.Sprintf("lookback=%d/boxSize=%d", lookback, boxSize), func(b *testing.B) {
+ b.StopTimer()
+
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
+
+ boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+ var v sql.NullString
+ for i := 0; i < b.N+lookback; i++ {
+ var pv persistedKVData
+ boxName := boxNames[i%totalBoxes]
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+
+ // benchmark reading the potentially cached value that was read lookback boxes ago
+ if i >= lookback {
+ boxName = boxNames[(i-lookback)%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
+ }
+
+ cleanup()
+ })
+ }
+ }
+}
+
// TestAccountTopOnline ensures accountsOnlineTop return a right subset of accounts
// from the history table.
// Start with two online accounts A, B at round 1
@@ -3103,11 +3573,11 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
//
// addr | rnd | status
// -----|-----|--------
-// A | 1 | 1
-// B | 1 | 1
-// A | 2 | 0
-// B | 3 | 0
-// C | 3 | 1
+// A | 1 | 1
+// B | 1 | 1
+// A | 2 | 0
+// B | 3 | 0
+// C | 3 | 1
//
// Ensure
// - for round 1 A and B returned
@@ -3215,7 +3685,7 @@ func TestAccountOnlineQueries(t *testing.T) {
err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
- updatedAccts, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, map[basics.CreatableIndex]ledgercore.ModifiedCreatable{}, proto, rnd)
+ updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd)
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
@@ -4120,3 +4590,150 @@ func TestRemoveOfflineStateProofID(t *testing.T) {
}
}
}
+
+func randomBaseAccountData() baseAccountData {
+ vd := baseVotingData{
+ VoteFirstValid: basics.Round(crypto.RandUint64()),
+ VoteLastValid: basics.Round(crypto.RandUint64()),
+ VoteKeyDilution: crypto.RandUint64(),
+ }
+ crypto.RandBytes(vd.VoteID[:])
+ crypto.RandBytes(vd.StateProofID[:])
+ crypto.RandBytes(vd.SelectionID[:])
+
+ baseAD := baseAccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ RewardsBase: crypto.RandUint64(),
+ RewardedMicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ AuthAddr: ledgertesting.RandomAddress(),
+ TotalAppSchemaNumUint: crypto.RandUint64(),
+ TotalAppSchemaNumByteSlice: crypto.RandUint64(),
+ TotalExtraAppPages: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+ TotalAssetParams: crypto.RandUint64(),
+ TotalAssets: crypto.RandUint64(),
+ TotalAppParams: crypto.RandUint64(),
+ TotalAppLocalStates: crypto.RandUint64(),
+ baseVotingData: vd,
+ UpdateRound: crypto.RandUint64(),
+ }
+
+ return baseAD
+}
+
+func TestEncodedBaseAccountDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ baseAD := randomBaseAccountData()
+ encoded := baseAD.MarshalMsg(nil)
+ require.GreaterOrEqual(t, MaxEncodedBaseAccountDataSize, len(encoded))
+}
+
+func makeString(len int) string {
+ s := ""
+ for i := 0; i < len; i++ {
+ s += string(byte(i))
+ }
+ return s
+}
+
+func randomAssetResourceData() resourcesData {
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // resourcesData is suiteable for keeping asset params, holding, app params, app local state
+ // but only asset + holding or app + local state can appear there
+ rdAsset := resourcesData{
+ Total: crypto.RandUint64(),
+ Decimals: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+ DefaultFrozen: true,
+ // MetadataHash
+ UnitName: makeString(currentConsensusParams.MaxAssetUnitNameBytes),
+ AssetName: makeString(currentConsensusParams.MaxAssetNameBytes),
+ URL: makeString(currentConsensusParams.MaxAssetURLBytes),
+ Manager: ledgertesting.RandomAddress(),
+ Reserve: ledgertesting.RandomAddress(),
+ Freeze: ledgertesting.RandomAddress(),
+ Clawback: ledgertesting.RandomAddress(),
+
+ Amount: crypto.RandUint64(),
+ Frozen: true,
+ }
+ crypto.RandBytes(rdAsset.MetadataHash[:])
+
+ return rdAsset
+}
+
+func randomAppResourceData() resourcesData {
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ rdApp := resourcesData{
+
+ SchemaNumUint: crypto.RandUint64(),
+ SchemaNumByteSlice: crypto.RandUint64(),
+ // KeyValue
+
+ // ApprovalProgram
+ // ClearStateProgram
+ // GlobalState
+ LocalStateSchemaNumUint: crypto.RandUint64(),
+ LocalStateSchemaNumByteSlice: crypto.RandUint64(),
+ GlobalStateSchemaNumUint: crypto.RandUint64(),
+ GlobalStateSchemaNumByteSlice: crypto.RandUint64(),
+ ExtraProgramPages: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+
+ ResourceFlags: 255,
+ UpdateRound: crypto.RandUint64(),
+ }
+
+ // MaxAvailableAppProgramLen is conbined size of approval and clear state since it is bound by proto.MaxAppTotalProgramLen
+ rdApp.ApprovalProgram = make([]byte, config.MaxAvailableAppProgramLen/2)
+ crypto.RandBytes(rdApp.ApprovalProgram)
+ rdApp.ClearStateProgram = make([]byte, config.MaxAvailableAppProgramLen/2)
+ crypto.RandBytes(rdApp.ClearStateProgram)
+
+ maxGlobalState := make(basics.TealKeyValue, currentConsensusParams.MaxGlobalSchemaEntries)
+ for globalKey := uint64(0); globalKey < currentConsensusParams.MaxGlobalSchemaEntries; globalKey++ {
+ prefix := fmt.Sprintf("%d|", globalKey)
+ padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
+ maxKey := prefix + padding
+ maxValue := basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
+ }
+ maxGlobalState[maxKey] = maxValue
+ }
+
+ maxLocalState := make(basics.TealKeyValue, currentConsensusParams.MaxLocalSchemaEntries)
+ for localKey := uint64(0); localKey < currentConsensusParams.MaxLocalSchemaEntries; localKey++ {
+ prefix := fmt.Sprintf("%d|", localKey)
+ padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
+ maxKey := prefix + padding
+ maxValue := basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
+ }
+ maxLocalState[maxKey] = maxValue
+ }
+
+ rdApp.GlobalState = maxGlobalState
+ rdApp.KeyValue = maxLocalState
+
+ return rdApp
+}
+
+func TestEncodedBaseResourceSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // resourcesData is suiteable for keeping asset params, holding, app params, app local state
+ // but only asset + holding or app + local state can appear there
+ rdAsset := randomAssetResourceData()
+ rdApp := randomAppResourceData()
+
+ encodedAsset := rdAsset.MarshalMsg(nil)
+ encodedApp := rdApp.MarshalMsg(nil)
+
+ require.Less(t, len(encodedAsset), len(encodedApp))
+ require.GreaterOrEqual(t, MaxEncodedBaseResourceDataSize, len(encodedApp))
+}
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index 03af7908d..e9a20046f 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -523,7 +523,7 @@ func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, e
var roundOffsetError *RoundOffsetError
if !errors.As(err, &roundOffsetError) {
- ao.log.Errorf("onlineTotalsImpl error: %w", err)
+ ao.log.Errorf("onlineTotalsImpl error: %v", err)
}
totalsOnline, err = ao.accountsq.lookupOnlineTotalsHistory(rnd)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index db2a99126..836a7c670 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -23,6 +23,7 @@ import (
"fmt"
"io"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -60,12 +61,21 @@ const baseAccountsPendingAccountsWarnThreshold = 85000
// baseResourcesPendingAccountsBufferSize defines the size of the base resources pending accounts buffer size.
// At the beginning of a new round, the entries from this buffer are being flushed into the base resources map.
-const baseResourcesPendingAccountsBufferSize = 100000
+const baseResourcesPendingAccountsBufferSize = 10000
// baseResourcesPendingAccountsWarnThreshold defines the threshold at which the lruResources would generate a warning
// after we've surpassed a given pending account resources size. The warning is being generated when the pending accounts data
// is being flushed into the main base resources cache.
-const baseResourcesPendingAccountsWarnThreshold = 85000
+const baseResourcesPendingAccountsWarnThreshold = 8500
+
+// baseKVPendingBufferSize defines the size of the base KVs pending buffer size.
+// At the beginning of a new round, the entries from this buffer are being flushed into the base KVs map.
+const baseKVPendingBufferSize = 5000
+
+// baseKVPendingWarnThreshold defines the threshold at which the lruKV would generate a warning
+// after we've surpassed a given pending kv size. The warning is being generated when the pending kv data
+// is being flushed into the main base kv cache.
+const baseKVPendingWarnThreshold = 4250
// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
// initializeCaches method before it can process and store the account changes to disk.
@@ -124,6 +134,23 @@ type modifiedResource struct {
ndeltas int
}
+// A modifiedKvValue represents a kv store change since the persistent state
+// stored in the DB (i.e., in the range of rounds covered by the accountUpdates
+// tracker).
+type modifiedKvValue struct {
+ // data stores the most recent value (nil == deleted)
+ data []byte
+
+ // oldData stores the previous vlaue (nil == didn't exist)
+ oldData []byte
+
+ // ndelta keeps track of how many times the key for this value appears in
+ // accountUpdates.deltas. This is used to evict modifiedValue entries when
+ // all changes to a key have been reflected in the kv table, and no
+ // outstanding modifications remain.
+ ndeltas int
+}
+
type accountUpdates struct {
// Connection to the database.
dbs db.Pair
@@ -146,6 +173,13 @@ type accountUpdates struct {
// address&resource that appears in deltas.
resources resourcesUpdates
+ // kvDeltas stores kvPair updates for every round after dbRound.
+ kvDeltas []map[string]ledgercore.KvValueDelta
+
+ // kvStore has the most recent kv pairs for every write/del that appears in
+ // deltas.
+ kvStore map[string]modifiedKvValue
+
// creatableDeltas stores creatable updates for every round after dbRound.
creatableDeltas []map[basics.CreatableIndex]ledgercore.ModifiedCreatable
@@ -184,6 +218,9 @@ type accountUpdates struct {
// baseResources stores the most recently used resources, at exactly dbRound
baseResources lruResources
+ // baseKVs stores the most recently used KV, at exactly dbRound
+ baseKVs lruKV
+
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -288,12 +325,264 @@ func (au *accountUpdates) close() {
}
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseKVs.prune(0)
+}
+
+// flushCaches flushes any pending data in caches so that it is fully available during future lookups.
+func (au *accountUpdates) flushCaches() {
+ au.accountsMu.Lock()
+
+ au.baseAccounts.flushPendingWrites()
+ au.baseResources.flushPendingWrites()
+ au.baseKVs.flushPendingWrites()
+
+ au.accountsMu.Unlock()
}
func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) {
return au.lookupResource(rnd, addr, aidx, ctype, true /* take lock */)
}
+func (au *accountUpdates) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return au.lookupKv(rnd, key, true /* take lock */)
+}
+
+func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bool) ([]byte, error) {
+ needUnlock := false
+ if synchronized {
+ au.accountsMu.RLock()
+ needUnlock = true
+ }
+ defer func() {
+ if needUnlock {
+ au.accountsMu.RUnlock()
+ }
+ }()
+
+ // TODO: This loop and round handling is copied from other routines like
+ // lookupResource. I believe that it is overly cautious, as it always reruns
+ // the lookup if the DB round does not match the expected round. However, as
+ // long as the db round has not advanced too far (greater than `rnd`), I
+ // believe it would be valid to use. In the interest of minimizing changes,
+ // I'm not doing that now.
+
+ for {
+ currentDbRound := au.cachedDBRound
+ currentDeltaLen := len(au.deltas)
+ offset, err := au.roundOffset(rnd)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if we have this key in `kvStore`, as that means the change we
+ // care about is in kvDeltas (and maybe just kvStore itself)
+ mval, indeltas := au.kvStore[key]
+ if indeltas {
+ // Check if this is the most recent round, in which case, we can
+ // use a cache of the most recent kvStore state
+ if offset == uint64(len(au.kvDeltas)) {
+ return mval.data, nil
+ }
+
+ // the key is in the deltas, but we don't know if it appears in the
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards so later updates take priority.
+ for offset > 0 {
+ offset--
+ mval, ok := au.kvDeltas[offset][key]
+ if ok {
+ return mval.Data, nil
+ }
+ }
+ } else {
+ // we know that the key is not in kvDeltas - so there is no point in scanning it.
+ // we've going to fall back to search in the database, but before doing so, we should
+ // update the rnd so that it would point to the end of the known delta range.
+ // ( that would give us the best validity range )
+ rnd = currentDbRound + basics.Round(currentDeltaLen)
+ }
+
+ // check the baseKV cache
+ if pbd, has := au.baseKVs.read(key); has {
+ // we don't technically need this, since it's already in the baseKV, however, writing this over
+ // would ensure that we promote this field.
+ au.baseKVs.writePending(pbd, key)
+ return pbd.value, nil
+ }
+
+ if synchronized {
+ au.accountsMu.RUnlock()
+ needUnlock = false
+ }
+
+ // No updates of this account in kvDeltas; use on-disk DB. The check in
+ // roundOffset() made sure the round is exactly the one present in the
+ // on-disk DB.
+
+ persistedData, err := au.accountsq.lookupKeyValue(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if persistedData.round == currentDbRound {
+ // if we read actual data return it. This includes deleted values
+ // where persistedData.value == nil to avoid unnecessary db lookups
+ // for deleted KVs.
+ au.baseKVs.writePending(persistedData, key)
+ return persistedData.value, nil
+ }
+
+ // The db round is unexpected...
+ if synchronized {
+ if persistedData.round < currentDbRound {
+ // Somehow the db is LOWER than it should be.
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d is behind in-memory round %d", persistedData.round, currentDbRound)
+ return nil, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ }
+ // The db is higher, so a write must have happened. Try again.
+ au.accountsMu.RLock()
+ needUnlock = true
+ // WHY BOTH - seems the goal is just to wait until the au is aware of progress. au.cachedDBRound should be enough?
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
+ au.accountsReadCond.Wait()
+ }
+ } else {
+ // in non-sync mode, we don't wait since we already assume that we're synchronized.
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d mismatching in-memory round %d", persistedData.round, currentDbRound)
+ return nil, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ }
+
+ }
+}
+
+func (au *accountUpdates) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ return au.lookupKeysByPrefix(round, keyPrefix, maxKeyNum, true /* take lock */)
+}
+
+func (au *accountUpdates) lookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64, synchronized bool) (resultKeys []string, err error) {
+ var results map[string]bool
+ // keep track of the number of result key with value
+ var resultCount uint64
+
+ needUnlock := false
+ if synchronized {
+ au.accountsMu.RLock()
+ needUnlock = true
+ }
+ defer func() {
+ if needUnlock {
+ au.accountsMu.RUnlock()
+ }
+ // preparation of result happens in deferring function
+ // prepare result only when err != nil
+ if err == nil {
+ resultKeys = make([]string, 0, resultCount)
+ for resKey, present := range results {
+ if present {
+ resultKeys = append(resultKeys, resKey)
+ }
+ }
+ }
+ }()
+
+ // TODO: This loop and round handling is copied from other routines like
+ // lookupResource. I believe that it is overly cautious, as it always reruns
+ // the lookup if the DB round does not match the expected round. However, as
+ // long as the db round has not advanced too far (greater than `rnd`), I
+ // believe it would be valid to use. In the interest of minimizing changes,
+ // I'm not doing that now.
+
+ for {
+ currentDBRound := au.cachedDBRound
+ currentDeltaLen := len(au.deltas)
+ offset, rndErr := au.roundOffset(round)
+ if rndErr != nil {
+ return nil, rndErr
+ }
+
+ // reset `results` to be empty each iteration
+ // if db round does not match the round number returned from DB query, start over again
+ // NOTE: `results` is maintained as we walk backwards from the latest round, to DB
+ // IT IS NOT SIMPLY A SET STORING KEY NAMES!
+ // - if the boolean for the key is true: we consider the key is still valid in later round
+ // - otherwise, we consider that the key is deleted in later round, and we will not return it as part of result
+ // Thus: `resultCount` keeps track of how many VALID keys in the `results`
+ // DO NOT TRY `len(results)` TO SEE NUMBER OF VALID KEYS!
+ results = map[string]bool{}
+ resultCount = 0
+
+ for offset > 0 {
+ offset--
+ for keyInRound, mv := range au.kvDeltas[offset] {
+ if !strings.HasPrefix(keyInRound, keyPrefix) {
+ continue
+ }
+ // whether it is set or deleted in later round, if such modification exists in later round
+ // we just ignore the earlier insert
+ if _, ok := results[keyInRound]; ok {
+ continue
+ }
+ if mv.Data == nil {
+ results[keyInRound] = false
+ } else {
+ // set such key to be valid with value
+ results[keyInRound] = true
+ resultCount++
+ // check if the size of `results` reaches `maxKeyNum`
+ // if so just return the list of keys
+ if resultCount == maxKeyNum {
+ return
+ }
+ }
+ }
+ }
+
+ round = currentDBRound + basics.Round(currentDeltaLen)
+
+ // after this line, we should dig into DB I guess
+ // OTHER LOOKUPS USE "base" caches here.
+ if synchronized {
+ au.accountsMu.RUnlock()
+ needUnlock = false
+ }
+
+ // NOTE: the kv cache isn't used here because the data structure doesn't support range
+ // queries. It may be preferable to increase the SQLite cache size if these reads become
+ // too slow.
+
+ // Finishing searching updates of this account in kvDeltas, keep going: use on-disk DB
+ // to find the rest matching keys in DB.
+ dbRound, dbErr := au.accountsq.lookupKeysByPrefix(keyPrefix, maxKeyNum, results, resultCount)
+ if dbErr != nil {
+ return nil, dbErr
+ }
+ if dbRound == currentDBRound {
+ return
+ }
+
+ // The DB round is unexpected... '_>'?
+ if synchronized {
+ if dbRound < currentDBRound {
+ // does not make sense if DB round is earlier than it should be
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d is behind in-memory round %d", dbRound, currentDBRound)
+ err = &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDBRound}
+ return
+ }
+ // The DB round is higher than expected, so a write-into-DB must have happened. Start over again.
+ au.accountsMu.RLock()
+ needUnlock = true
+ // WHY BOTH - seems the goal is just to wait until the au is aware of progress. au.cachedDBRound should be enough?
+ for currentDBRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
+ au.accountsReadCond.Wait()
+ }
+ } else {
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d mismatching in-memory round %d", dbRound, currentDBRound)
+ err = &MismatchingDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDBRound}
+ return
+ }
+ }
+}
+
// LookupWithoutRewards returns the account data for a given address at a given round.
func (au *accountUpdates) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (data ledgercore.AccountData, validThrough basics.Round, err error) {
data, validThrough, _, _, err = au.lookupWithoutRewards(rnd, addr, true /* take lock*/)
@@ -538,6 +827,8 @@ type accountUpdatesLedgerEvaluator struct {
prevHeader bookkeeping.BlockHeader
}
+func (aul *accountUpdatesLedgerEvaluator) FlushCaches() {}
+
// GenesisHash returns the genesis hash
func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest {
return aul.au.ledger.GenesisHash()
@@ -583,7 +874,7 @@ func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basic
return fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initialization ")
}
-// lookupWithoutRewards returns the account balance for a given address at a given round, without the reward
+// LookupWithoutRewards returns the account balance for a given address at a given round, without the reward
func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
data, validThrough, _, _, err := aul.au.lookupWithoutRewards(rnd, addr, false /*don't sync*/)
if err != nil {
@@ -603,6 +894,10 @@ func (aul *accountUpdatesLedgerEvaluator) LookupAsset(rnd basics.Round, addr bas
return ledgercore.AssetResource{AssetParams: r.AssetParams, AssetHolding: r.AssetHolding}, err
}
+func (aul *accountUpdatesLedgerEvaluator) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return aul.au.lookupKv(rnd, key, false /* don't sync */)
+}
+
// GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round
func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
@@ -665,14 +960,17 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.versions = []protocol.ConsensusVersion{hdr.CurrentProtocol}
au.deltas = nil
+ au.kvDeltas = nil
au.creatableDeltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
- au.resources = resourcesUpdates(make(map[accountCreatable]modifiedResource))
+ au.resources = make(resourcesUpdates)
+ au.kvStore = make(map[string]modifiedKvValue)
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
au.deltasAccum = []int{0}
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
+ au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold)
return
}
@@ -692,10 +990,12 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.deltas = append(au.deltas, delta.Accts)
au.versions = append(au.versions, blk.CurrentProtocol)
au.creatableDeltas = append(au.creatableDeltas, delta.Creatables)
+ au.kvDeltas = append(au.kvDeltas, delta.KvMods)
au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1])
au.baseAccounts.flushPendingWrites()
au.baseResources.flushPendingWrites()
+ au.baseKVs.flushPendingWrites()
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
@@ -727,6 +1027,14 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.resources.set(key, mres)
}
+ for k, v := range delta.KvMods {
+ mvalue := au.kvStore[k]
+ mvalue.ndeltas++
+ mvalue.data = v.Data
+ // leave mvalue.oldData alone
+ au.kvStore[k] = mvalue
+ }
+
for cidx, cdelta := range delta.Creatables {
mcreat := au.creatables[cidx]
mcreat.Creator = cdelta.Creator
@@ -743,6 +1051,8 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
+ newBaseKVSize := (len(au.kvStore) + 1) + baseKVPendingBufferSize
+ au.baseKVs.prune(newBaseKVSize)
}
// lookupLatest returns the account data for a given address for the latest round.
@@ -1003,9 +1313,8 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
return macct.resource, rnd, nil
}
// the account appears in the deltas, but we don't know if it appears in the
- // delta range of [0..offset], so we'll need to check :
- // Traverse the deltas backwards to ensure that later updates take
- // priority if present.
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
r, ok := au.deltas[offset].GetResource(addr, aidx, ctype)
@@ -1031,6 +1340,12 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
return macct.AccountResource(), rnd, nil
}
+ // check baseAccoiunts again to see if it does not exist
+ if au.baseResources.readNotFound(addr, aidx) {
+ // it seems the account doesnt exist
+ return ledgercore.AccountResource{}, rnd, nil
+ }
+
if synchronized {
au.accountsMu.RUnlock()
needUnlock = false
@@ -1050,6 +1365,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
au.baseResources.writePending(persistedData, addr)
return persistedData.AccountResource(), rnd, nil
}
+ au.baseResources.writeNotFoundPending(addr, aidx)
// otherwise return empty
return ledgercore.AccountResource{}, rnd, nil
}
@@ -1105,9 +1421,8 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
return macct.data, rnd, rewardsVersion, rewardsLevel, nil
}
// the account appears in the deltas, but we don't know if it appears in the
- // delta range of [0..offset], so we'll need to check :
- // Traverse the deltas backwards to ensure that later updates take
- // priority if present.
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
d, ok := au.deltas[offset].GetData(addr)
@@ -1133,6 +1448,12 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
return macct.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil
}
+ // check baseAccoiunts again to see if it does not exist
+ if au.baseAccounts.readNotFound(addr) {
+ // it seems the account doesnt exist
+ return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil
+ }
+
if synchronized {
au.accountsMu.RUnlock()
needUnlock = false
@@ -1152,6 +1473,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
au.baseAccounts.writePending(persistedData)
return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil
}
+ au.baseAccounts.writeNotFoundPending(addr)
// otherwise return empty
return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil
}
@@ -1309,6 +1631,7 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
dcc.compactAccountDeltas = makeCompactAccountDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts)
dcc.compactResourcesDeltas = makeCompactResourceDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts, au.baseResources)
+ dcc.compactKvDeltas = compactKvDeltas(au.kvDeltas[:offset])
dcc.compactCreatableDeltas = compactCreatableDeltas(au.creatableDeltas[:offset])
au.accountsMu.RUnlock()
@@ -1322,8 +1645,8 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
return nil
}
-// commitRound closure is called within the same transaction for all trackers
-// it receives current offset and dbRound
+// commitRound is called within the same transaction for all trackers it
+// receives current offset and dbRound
func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
offset := dcc.offset
dbRound := dcc.oldBase
@@ -1375,7 +1698,7 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe
// the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
// so that we can update the base account back.
- dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, dcc.updatedPersistedKVs, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
if err != nil {
return err
}
@@ -1453,6 +1776,26 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
}
+ for key, out := range dcc.compactKvDeltas {
+ cnt := out.ndeltas
+ mval, ok := au.kvStore[key]
+ if !ok {
+ au.log.Panicf("inconsistency: flushed %d changes to key %s, but not in au.kvStore", cnt, key)
+ }
+ if cnt > mval.ndeltas {
+ au.log.Panicf("inconsistency: flushed %d changes to key %s, but au.kvStore had %d", cnt, key, mval.ndeltas)
+ } else if cnt == mval.ndeltas {
+ delete(au.kvStore, key)
+ } else {
+ mval.ndeltas -= cnt
+ au.kvStore[key] = mval
+ }
+ }
+
+ for key, persistedKV := range dcc.updatedPersistedKVs {
+ au.baseKVs.write(persistedKV, key)
+ }
+
for cidx, modCrt := range dcc.compactCreatableDeltas {
cnt := modCrt.Ndeltas
mcreat, ok := au.creatables[cidx]
@@ -1487,6 +1830,7 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
au.deltasAccum = au.deltasAccum[offset:]
au.versions = au.versions[offset:]
au.roundTotals = au.roundTotals[offset:]
+ au.kvDeltas = au.kvDeltas[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
au.cachedDBRound = newBase
@@ -1518,6 +1862,31 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
func (au *accountUpdates) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
+// compactKvDeltas takes an array of kv deltas (one array entry per round), and
+// compacts the array into a single map that contains all the
+// changes. Intermediate changes are eliminated. It counts the number of
+// changes per round by specifying it in the ndeltas field of the
+// modifiedKv. The modifiedValues in the returned map have the earliest
+// mv.oldData, and the newest mv.data.
+func compactKvDeltas(kvDeltas []map[string]ledgercore.KvValueDelta) map[string]modifiedKvValue {
+ if len(kvDeltas) == 0 {
+ return nil
+ }
+ outKvDeltas := make(map[string]modifiedKvValue)
+ for _, roundKv := range kvDeltas {
+ for key, current := range roundKv {
+ prev, ok := outKvDeltas[key]
+ if !ok { // Record only the first OldData
+ prev.oldData = current.OldData
+ }
+ prev.data = current.Data // Replace with newest Data
+ prev.ndeltas++
+ outKvDeltas[key] = prev
+ }
+ }
+ return outKvDeltas
+}
+
// compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single
// map that contains all the deltas changes. While doing that, the function eliminate any intermediate changes.
// It counts the number of changes per round by specifying it in the ndeltas field of the modifiedCreatable.
@@ -1560,6 +1929,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
// rowid are flushed.
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseKVs.prune(0)
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index edd44549e..a19217fed 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -35,6 +35,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -1216,7 +1217,7 @@ func TestListCreatables(t *testing.T) {
// sync with the database
var updates compactAccountDeltas
var resUpdates compactResourcesDeltas
- _, _, err = accountsNewRound(tx, updates, resUpdates, ctbsWithDeletes, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1))
require.NoError(t, err)
// nothing left in cache
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
@@ -1232,14 +1233,315 @@ func TestListCreatables(t *testing.T) {
// ******* Results are obtained from the database and from the cache *******
// ******* Deletes are in the database and in the cache *******
// sync with the database. This has deletes synced to the database.
- _, _, err = accountsNewRound(tx, updates, resUpdates, au.creatables, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1))
require.NoError(t, err)
- // get new creatables in the cache. There will be deletes in the cache from the previous batch.
+ // get new creatables in the cache. There will be deleted in the cache from the previous batch.
au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs,
expectedDbImage, numElementsPerSegement)
listAndCompareComb(t, au, expectedDbImage)
}
+func TestBoxNamesByAppIDs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ initialBlocksCount := 1
+ accts := make(map[basics.Address]basics.AccountData)
+
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion,
+ []map[basics.Address]basics.AccountData{accts},
+ )
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, _ := newAcctUpdates(t, ml, conf)
+ defer au.close()
+
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, protocol.ConsensusCurrentVersion, protoParams, knownCreatables}
+
+ testingBoxNames := []string{
+ ` `,
+ ` `,
+ ` % `,
+ ` ? = % ;`,
+ `; DROP *;`,
+ `OR 1 = 1;`,
+ `" ; SELECT * FROM kvstore; DROP acctrounds; `,
+ `; SELECT key from kvstore WHERE key LIKE %;`,
+ `?&%!=`,
+ "SELECT * FROM kvstore " + string([]byte{0, 0}) + " WHERE key LIKE %; ",
+ `b64:APj/AA==`,
+ `str:123.3/aa\\0`,
+ string([]byte{0, 255, 254, 254}),
+ string([]byte{0, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF}),
+ string([]byte{'%', 'a', 'b', 'c', 0, 0, '%', 'a', '!'}),
+ `
+`,
+ `™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`,
+ `∑´´˙©˚¬∆ßåƒ√¬`,
+ `背负青天而莫之夭阏者,而后乃今将图南。`,
+ `於浩歌狂熱之際中寒﹔於天上看見深淵。`,
+ `於一切眼中看見無所有﹔於無所希望中得救。`,
+ `有一遊魂,化為長蛇,口有毒牙。`,
+ `不以嚙人,自嚙其身,終以殞顛。`,
+ `那些智力超常的人啊`,
+ `认为已经,熟悉了云和闪电的脾气`,
+ `就不再迷惑,就不必了解自己,世界和他人`,
+ `每天只管,被微风吹拂,与猛虎谈情`,
+ `他们从来,不需要楼梯,只有窗口`,
+ `把一切交付于梦境,和优美的浪潮`,
+ `在这颗行星所有的酒馆,青春自由似乎理所应得`,
+ `面向涣散的未来,只唱情歌,看不到坦克`,
+ `在科学和啤酒都不能安抚的夜晚`,
+ `他们丢失了四季,惶惑之行开始`,
+ `这颗行星所有的酒馆,无法听到远方的呼喊`,
+ `野心勃勃的灯火,瞬间吞没黑暗的脸庞`,
+ }
+
+ appIDset := make(map[basics.AppIndex]struct{}, len(testingBoxNames))
+ boxNameToAppID := make(map[string]basics.AppIndex, len(testingBoxNames))
+ var currentRound basics.Round
+
+ // keep adding one box key and one random appID (non-duplicated)
+ for i, boxName := range testingBoxNames {
+ currentRound = basics.Round(i + 1)
+
+ var appID basics.AppIndex
+ for {
+ appID = basics.AppIndex(crypto.RandUint64())
+ _, preExisting := appIDset[appID]
+ if !preExisting {
+ break
+ }
+ }
+
+ appIDset[appID] = struct{}{}
+ boxNameToAppID[boxName] = appID
+
+ boxChange := ledgercore.KvValueDelta{Data: []byte(boxName)}
+ auNewBlock(t, currentRound, au, accts, opts, map[string]ledgercore.KvValueDelta{
+ logic.MakeBoxKey(appID, boxName): boxChange,
+ })
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ if uint64(currentRound) > conf.MaxAcctLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+ } else {
+ require.Equal(t, basics.Round(0), au.cachedDBRound)
+ }
+
+ // check input, see all present keys are all still there
+ for _, storedBoxName := range testingBoxNames[:i+1] {
+ res, err := au.LookupKeysByPrefix(currentRound, logic.MakeBoxKey(boxNameToAppID[storedBoxName], ""), 10000)
+ require.NoError(t, err)
+ require.Len(t, res, 1)
+ require.Equal(t, logic.MakeBoxKey(boxNameToAppID[storedBoxName], storedBoxName), res[0])
+ }
+ }
+
+ // removing inserted boxes
+ for _, boxName := range testingBoxNames {
+ currentRound++
+
+ // remove inserted box
+ appID := boxNameToAppID[boxName]
+ auNewBlock(t, currentRound, au, accts, opts, map[string]ledgercore.KvValueDelta{
+ logic.MakeBoxKey(appID, boxName): {},
+ })
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure recently removed key is not present, and it is not part of the result
+ res, err := au.LookupKeysByPrefix(currentRound, logic.MakeBoxKey(boxNameToAppID[boxName], ""), 10000)
+ require.NoError(t, err)
+ require.Len(t, res, 0)
+ }
+}
+
+func TestKVCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ initialBlocksCount := 1
+ accts := make(map[basics.Address]basics.AccountData)
+
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion,
+ []map[basics.Address]basics.AccountData{accts},
+ )
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, _ := newAcctUpdates(t, ml, conf)
+ defer au.close()
+
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, protocol.ConsensusCurrentVersion, protoParams, knownCreatables}
+
+ kvCnt := 1000
+ kvsPerBlock := 100
+ curKV := 0
+ var currentRound basics.Round
+ currentDBRound := basics.Round(1)
+
+ kvMap := make(map[string][]byte)
+ for i := 0; i < kvCnt; i++ {
+ kvMap[fmt.Sprintf("%d", i)] = []byte(fmt.Sprintf("value%d", i))
+ }
+
+ // add kvsPerBlock KVs on each iteration. The first kvCnt/kvsPerBlock
+ // iterations produce a block with kvCnt kv manipulations. The last
+ // conf.MaxAcctLookback iterations are meant to verify the contents of the cache
+ // are correct after every kv containing block has been committed.
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ curKV++
+ val := kvMap[name]
+ kvMods[name] = ledgercore.KvValueDelta{Data: val, OldData: nil}
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ if uint64(currentRound) > conf.MaxAcctLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+ } else {
+ require.Equal(t, basics.Round(0), au.cachedDBRound)
+ }
+
+ // verify cache doesn't contain the new kvs until committed to DB.
+ for name := range kvMods {
+ _, has := au.baseKVs.read(name)
+ require.False(t, has)
+ }
+
+ // verify commited kvs appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ startKV := (currentDBRound - 1) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.Equal(t, kvMap[name], persistedValue.value)
+ }
+ }
+ }
+
+ // updating inserted KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ val := fmt.Sprintf("modified value%d", curKV)
+ kvMods[name] = ledgercore.KvValueDelta{Data: []byte(val)}
+ curKV++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.Equal(t, kvMap[name], persistedValue.value)
+ }
+ }
+
+ // verify commited updated kv values appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(kvCnt/kvsPerBlock + int(conf.MaxAcctLookback) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ expectedValue := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, expectedValue, string(persistedValue.value))
+ }
+ }
+ }
+
+ // deleting KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ // needs an old data, else optimized away.
+ // if oldData = "" there is the best chance of a bug, so we use that
+ kvMods[name] = ledgercore.KvValueDelta{Data: nil, OldData: []byte("")}
+ curKV++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ value := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, value, string(persistedValue.value))
+ }
+ }
+
+ // verify commited updated kv values appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(2*(kvCnt/kvsPerBlock+int(conf.MaxAcctLookback)) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.True(t, persistedValue.value == nil)
+ }
+ }
+ }
+}
+
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT rowid, address, data FROM accountbase")
if err != nil {
@@ -1319,7 +1621,7 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
}
err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
return
})
require.NoError(b, err)
@@ -1671,9 +1973,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
newAccts := applyPartialDeltas(accts[i-1], updates)
blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
+ BlockHeader: bookkeeping.BlockHeader{Round: i},
}
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = protocolVersion
@@ -2453,7 +2753,7 @@ type auNewBlockOpts struct {
knownCreatables map[basics.CreatableIndex]bool
}
-func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[basics.Address]basics.AccountData, data auNewBlockOpts) {
+func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[basics.Address]basics.AccountData, data auNewBlockOpts, kvMods map[string]ledgercore.KvValueDelta) {
rewardLevel := uint64(0)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, rnd-1, prevRound)
@@ -2462,9 +2762,7 @@ func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[bas
newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, data.updates, rewardLevel, data.protoParams, base, prevTotals)
blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(rnd),
- },
+ BlockHeader: bookkeeping.BlockHeader{Round: rnd},
}
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = data.version
@@ -2472,6 +2770,7 @@ func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[bas
delta.Accts.MergeAccounts(data.updates)
delta.Creatables = creatablesFromUpdates(base, data.updates, data.knownCreatables)
delta.Totals = newTotals
+ delta.KvMods = kvMods
au.newBlock(blk, delta)
}
@@ -2537,7 +2836,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
// prepare block
opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, i, au, base, opts)
+ auNewBlock(t, i, au, base, opts, nil)
// commit changes synchroniously
auCommitSync(t, i, au, ml)
@@ -2601,7 +2900,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
au.cachedDBRound = oldCachedDBRound
au.accountsMu.Unlock()
opts := auNewBlockOpts{ledgercore.AccountDeltas{}, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, rnd+1, au, accts[rnd], opts)
+ auNewBlock(t, rnd+1, au, accts[rnd], opts, nil)
auCommitSync(t, rnd+1, au, ml)
wg.Wait()
@@ -2685,7 +2984,7 @@ func TestAcctUpdatesLookupResources(t *testing.T) {
// prepare block
opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, i, au, base, opts)
+ auNewBlock(t, i, au, base, opts, nil)
if i <= basics.Round(protoParams.MaxBalLookback+1) {
auCommitSync(t, i, au, ml)
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index dd6213318..e9e9f699c 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -746,7 +746,7 @@ func TestAppCallOptIn(t *testing.T) {
prevMaxAppsOptedIn := config.Consensus[protocol.ConsensusV24].MaxAppsOptedIn
for _, testProtoVer := range optInCountTest {
cparams, ok := config.Consensus[testProtoVer]
- a.True(ok)
+ a.True(ok, testProtoVer)
if cparams.MaxAppsOptedIn > 0 {
a.LessOrEqual(prevMaxAppsOptedIn, cparams.MaxAppsOptedIn)
}
diff --git a/ledger/apply/payment.go b/ledger/apply/payment.go
index 9c1f0f94e..c86f791e5 100644
--- a/ledger/apply/payment.go
+++ b/ledger/apply/payment.go
@@ -99,6 +99,15 @@ func Payment(payment transactions.PaymentTxnFields, header transactions.Header,
return fmt.Errorf("cannot close: %d outstanding applications opted in. Please opt out or clear them", totalAppLocalStates)
}
+ // Confirm that there is no box-related state in the account
+ if rec.TotalBoxes > 0 {
+ return fmt.Errorf("cannot close: %d outstanding boxes", rec.TotalBoxes)
+ }
+ if rec.TotalBoxBytes > 0 {
+ // This should be impossible because every box byte comes from the existence of a box.
+ return fmt.Errorf("cannot close: %d outstanding box bytes", rec.TotalBoxBytes)
+ }
+
// Can't have created apps remaining either
totalAppParams := rec.TotalAppParams
if totalAppParams > 0 {
diff --git a/ledger/internal/apptxn_test.go b/ledger/apptxn_test.go
index 86a4a7fea..194c6e84a 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -14,12 +14,12 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"encoding/hex"
"fmt"
- "strings"
+ "strconv"
"testing"
"github.com/stretchr/testify/require"
@@ -27,43 +27,27 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
- "github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-// main wraps up some TEAL source in a header and footer so that it is
-// an app that does nothing at create time, but otherwise runs source,
-// then approves, if the source avoids panicing and leaves the stack
-// empty.
-func main(source string) string {
- return strings.Replace(fmt.Sprintf(`txn ApplicationID
- bz end
- %s
- end: int 1`, source), ";", "\n", -1)
-}
-
// TestPayAction ensures a pay in teal affects balances
func TestPayAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Inner txns start in v30
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ ai := dl.fundedApp(addrs[0], 200000, // account min balance, plus fees
+ main(`
itxn_begin
int pay
itxn_field TypeEnum
@@ -72,130 +56,110 @@ func TestPayAction(t *testing.T) {
txn Accounts 1
itxn_field Receiver
itxn_submit
-`),
- }
+ `))
- ai := basics.AppIndex(1)
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: ai.Address(),
- Amount: 200000, // account min balance, plus fees
- }
+ require.Equal(t, ai, basics.AppIndex(1))
- payout1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: ai,
- Accounts: []basics.Address{addrs[1]}, // pay self
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &create, &fund, &payout1)
- vb := endBlock(t, l, eval)
+ payout1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: ai,
+ Accounts: []basics.Address{addrs[1]}, // pay self
+ }
- // AD contains expected appIndex
- require.Equal(t, ai, vb.Block().Payset[0].ApplyData.ApplicationID)
+ dl.fullBlock(&payout1)
- ad0 := micros(t, l, addrs[0])
- ad1 := micros(t, l, addrs[1])
- app := micros(t, l, ai.Address())
+ ad0 := micros(dl.t, dl.generator, addrs[0])
+ ad1 := micros(dl.t, dl.generator, addrs[1])
+ app := micros(dl.t, dl.generator, ai.Address())
- genAccounts := genesisInitState.Accounts
- // create(1000) and fund(1000 + 200000)
- require.Equal(t, uint64(202000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0)
- // paid 5000, but 1000 fee
- require.Equal(t, uint64(4000), ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
- // app still has 194000 (paid out 5000, and paid fee to do it)
- require.Equal(t, uint64(194000), app)
+ genAccounts := genBalances.Balances
+ // create(1000) and fund(1000 + 200000)
+ require.Equal(t, uint64(202000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0)
+ // paid 5000, but 1000 fee
+ require.Equal(t, uint64(4000), ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
+ // app still has 194000 (paid out 5000, and paid fee to do it)
+ require.Equal(t, uint64(194000), app)
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- eval = nextBlock(t, l)
- endBlock(t, l, eval)
- }
-
- eval = nextBlock(t, l)
- payout2 := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: ai,
- Accounts: []basics.Address{addrs[2]}, // pay other
- }
- txn(t, l, eval, &payout2)
- // confirm that modifiedAccounts can see account in inner txn
- vb = endBlock(t, l, eval)
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
- deltas := vb.Delta()
- require.Contains(t, deltas.Accts.ModifiedAccounts(), addrs[2])
-
- payInBlock := vb.Block().Payset[0]
- rewards := payInBlock.ApplyData.SenderRewards.Raw
- require.Greater(t, rewards, uint64(2000)) // some biggish number
- inners := payInBlock.ApplyData.EvalDelta.InnerTxns
- require.Len(t, inners, 1)
-
- // addr[2] is going to get the same rewards as addr[1], who
- // originally sent the top-level txn. Both had their algo balance
- // touched and has very nearly the same balance.
- require.Equal(t, rewards, inners[0].ReceiverRewards.Raw)
- // app gets none, because it has less than 1A
- require.Equal(t, uint64(0), inners[0].SenderRewards.Raw)
-
- ad1 = micros(t, l, addrs[1])
- ad2 := micros(t, l, addrs[2])
- app = micros(t, l, ai.Address())
-
- // paid 5000, in first payout (only), but paid 1000 fee in each payout txn
- require.Equal(t, rewards+3000, ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
- // app still has 188000 (paid out 10000, and paid 2k fees to do it)
- // no rewards because owns less than an algo
- require.Equal(t, uint64(200000)-10000-2000, app)
-
- // paid 5000 by payout2, never paid any fees, got same rewards
- require.Equal(t, rewards+uint64(5000), ad2-genAccounts[addrs[2]].MicroAlgos.Raw)
-
- // Now fund the app account much more, so we can confirm it gets rewards.
- tenkalgos := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: ai.Address(),
- Amount: 10 * 1000 * 1000000, // account min balance, plus fees
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &tenkalgos)
- endBlock(t, l, eval)
- beforepay := micros(t, l, ai.Address())
+ payout2 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: ai,
+ Accounts: []basics.Address{addrs[2]}, // pay other
+ }
+ vb := dl.fullBlock(&payout2)
+ // confirm that modifiedAccounts can see account in inner txn
+
+ deltas := vb.Delta()
+ require.Contains(t, deltas.Accts.ModifiedAccounts(), addrs[2])
+
+ payInBlock := vb.Block().Payset[0]
+ rewards := payInBlock.ApplyData.SenderRewards.Raw
+ require.Greater(t, rewards, uint64(2000)) // some biggish number
+ inners := payInBlock.ApplyData.EvalDelta.InnerTxns
+ require.Len(t, inners, 1)
+
+ // addr[2] is going to get the same rewards as addr[1], who
+ // originally sent the top-level txn. Both had their algo balance
+ // touched and has very nearly the same balance.
+ require.Equal(t, rewards, inners[0].ReceiverRewards.Raw)
+ // app gets none, because it has less than 1A
+ require.Equal(t, uint64(0), inners[0].SenderRewards.Raw)
+
+ ad1 = micros(dl.t, dl.validator, addrs[1])
+ ad2 := micros(dl.t, dl.validator, addrs[2])
+ app = micros(dl.t, dl.validator, ai.Address())
+
+ // paid 5000, in first payout (only), but paid 1000 fee in each payout txn
+ require.Equal(t, rewards+3000, ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
+ // app still has 188000 (paid out 10000, and paid 2k fees to do it)
+ // no rewards because owns less than an algo
+ require.Equal(t, uint64(200000)-10000-2000, app)
+
+ // paid 5000 by payout2, never paid any fees, got same rewards
+ require.Equal(t, rewards+uint64(5000), ad2-genAccounts[addrs[2]].MicroAlgos.Raw)
+
+ // Now fund the app account much more, so we can confirm it gets rewards.
+ tenkalgos := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: ai.Address(),
+ Amount: 10 * 1000 * 1000000, // account min balance, plus fees
+ }
+ dl.fullBlock(&tenkalgos)
+ beforepay := micros(dl.t, dl.validator, ai.Address())
- // Build up Residue in RewardsState so it's ready to pay again
- for i := 1; i < 10; i++ {
- eval = nextBlock(t, l)
- endBlock(t, l, eval)
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, payout2.Noted("2"))
- vb = endBlock(t, l, eval)
+ // Build up Residue in RewardsState so it's ready to pay again
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+ vb = dl.fullBlock(payout2.Noted("2"))
- afterpay := micros(t, l, ai.Address())
+ afterpay := micros(dl.t, dl.validator, ai.Address())
- payInBlock = vb.Block().Payset[0]
- inners = payInBlock.ApplyData.EvalDelta.InnerTxns
- require.Len(t, inners, 1)
+ payInBlock = vb.Block().Payset[0]
+ inners = payInBlock.ApplyData.EvalDelta.InnerTxns
+ require.Len(t, inners, 1)
- appreward := inners[0].SenderRewards.Raw
- require.Greater(t, appreward, uint64(1000))
+ appreward := inners[0].SenderRewards.Raw
+ require.Greater(t, appreward, uint64(1000))
- require.Equal(t, beforepay+appreward-5000-1000, afterpay)
+ require.Equal(t, beforepay+appreward-5000-1000, afterpay)
+ })
}
// TestAxferAction ensures axfers in teal have the intended effects
func TestAxferAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusFuture)
defer l.Close()
asa := txntest.Txn{
@@ -383,36 +347,10 @@ submit: itxn_submit
require.Equal(t, uint64(20000), back-left)
}
-func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *ledger.Ledger {
- return newTestLedgerWithConsensusVersion(t, balances, protocol.ConsensusFuture)
-}
-
-func newTestLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *ledger.Ledger {
- var genHash crypto.Digest
- crypto.RandBytes(genHash[:])
- return newTestLedgerFull(t, balances, cv, genHash)
-}
-
-func newTestLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *ledger.Ledger {
- genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
- require.NoError(t, err)
- require.False(t, genBlock.FeeSink.IsZero())
- require.False(t, genBlock.RewardsPool.IsZero())
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := ledger.OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
- Block: genBlock,
- Accounts: balances.Balances,
- GenesisHash: genHash,
- }, cfg)
- require.NoError(t, err)
- return l
-}
-
// TestClawbackAction ensures an app address can act as clawback address.
func TestClawbackAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -498,6 +436,7 @@ func TestClawbackAction(t *testing.T) {
// TestRekeyAction ensures an app can transact for a rekeyed account
func TestRekeyAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -602,6 +541,7 @@ skipclose:
// properly removes the app as an authorizer for the account
func TestRekeyActionCloseAccount(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -677,6 +617,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
// TestDuplicatePayAction shows two pays with same parameters can be done as inner tarnsactions
func TestDuplicatePayAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -752,6 +693,7 @@ func TestDuplicatePayAction(t *testing.T) {
// TestInnerTxCount ensures that inner transactions increment the TxnCounter
func TestInnerTxnCount(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -800,6 +742,7 @@ func TestInnerTxnCount(t *testing.T) {
// TestAcfgAction ensures assets can be created and configured in teal
func TestAcfgAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -976,6 +919,7 @@ submit: itxn_submit
// we can know, so it helps exercise txncounter changes.
func TestAsaDuringInit(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1029,6 +973,7 @@ func TestAsaDuringInit(t *testing.T) {
func TestRekey(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1080,6 +1025,7 @@ func TestRekey(t *testing.T) {
func TestNote(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1128,6 +1074,7 @@ func TestNote(t *testing.T) {
func TestKeyreg(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1228,6 +1175,7 @@ nonpart:
func TestInnerAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1295,6 +1243,7 @@ func TestInnerAppCall(t *testing.T) {
// the changes expected when invoked.
func TestInnerAppManipulate(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1405,8 +1354,8 @@ func TestCreateAndUse(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// At 30 the asset reference is illegal, then from v31 it works.
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
createapp := txntest.Txn{
@@ -1475,8 +1424,8 @@ func TestGtxnEffects(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// At 30 `gtxn CreatedAssetId is illegal, then from v31 it works.
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
createapp := txntest.Txn{
@@ -1537,8 +1486,8 @@ func TestBasicReentry(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
app0 := txntest.Txn{
@@ -1731,8 +1680,8 @@ func TestMaxInnerTxForSingleAppCall(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// v31 = inner appl
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
program := `
@@ -1891,8 +1840,8 @@ func TestInnerAppVersionCalling(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// 31 allowed inner appls. v34 lowered proto.MinInnerApplVersion
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
three, err := logic.AssembleStringWithVersion("int 1", 3)
@@ -2085,8 +2034,8 @@ func TestAppDowngrade(t *testing.T) {
// Confirm that in old protocol version, downgrade is legal
// Start at 28 because we want to v4 app to downgrade to v3
- testConsensusRange(t, 28, 30, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 28, 30, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
create := txntest.Txn{
@@ -2116,8 +2065,8 @@ func TestAppDowngrade(t *testing.T) {
dl.fullBlock(&update)
})
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
create := txntest.Txn{
@@ -2361,7 +2310,7 @@ func executeMegaContract(b *testing.B) {
var cv protocol.ConsensusVersion = "temp test"
config.Consensus[cv] = vTest
- l := newTestLedgerWithConsensusVersion(b, genBalances, cv)
+ l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv)
defer l.Close()
defer delete(config.Consensus, cv)
@@ -2458,6 +2407,7 @@ func BenchmarkMaximumCallStackDepth(b *testing.B) {
// TestInnerClearState ensures inner ClearState performs close out properly, even if rejects.
func TestInnerClearState(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2546,6 +2496,7 @@ itxn_submit
// allowed to use more than 700 (MaxAppProgramCost)
func TestInnerClearStateBadCallee(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2647,6 +2598,7 @@ skip:
// be called with less than 700 (MaxAppProgramCost)) OpcodeBudget.
func TestInnerClearStateBadCaller(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2768,6 +2720,7 @@ itxn_submit
// v30, but not in vFuture. (Test should add v31 after it exists.)
func TestClearStateInnerPay(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
tests := []struct {
consensus protocol.ConsensusVersion
@@ -2783,7 +2736,7 @@ func TestClearStateInnerPay(t *testing.T) {
t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedgerWithConsensusVersion(t, genBalances, test.consensus)
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, test.consensus)
defer l.Close()
app0 := txntest.Txn{
@@ -2880,6 +2833,7 @@ itxn_submit
// calls when using inners.
func TestGlobalChangesAcrossApps(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2988,6 +2942,7 @@ check:
// calls when using inners.
func TestLocalChangesAcrossApps(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -3101,8 +3056,8 @@ func TestForeignAppAccountsAccessible(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3167,8 +3122,8 @@ func TestForeignAppAccountsImmutable(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3221,8 +3176,8 @@ func TestForeignAppAccountsMutable(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3302,9 +3257,8 @@ func TestReloadWithTxns(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 34, 0, func(t *testing.T, ver int) {
- fmt.Printf("testConsensus %d\n", ver)
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
dl.fullBlock() // So that the `block` opcode has a block to inspect
@@ -3320,3 +3274,484 @@ func TestReloadWithTxns(t *testing.T) {
dl.reloadLedgers()
})
}
+
+// TestEvalAppState ensures txns in a group can't violate app state schema
+// limits. It ensures that commitToParent -> applyChild copies child's cow state
+// usage counts into parent and the usage counts are correctly propagated from
+// parent cow to child cow and back. When limits are not violated, the test
+// ensures that the updates are correct.
+func TestEvalAppState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ appcall1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ GlobalStateSchema: basics.StateSchema{NumByteSlice: 1},
+ ApprovalProgram: `#pragma version 2
+ txn ApplicationID
+ bz create
+ byte "caller"
+ txn Sender
+ app_global_put
+ b ok
+create:
+ byte "creator"
+ txn Sender
+ app_global_put
+ok:
+ int 1`,
+ ClearStateProgram: "#pragma version 2\nint 1",
+ }
+
+ appcall2 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApplicationID: 1,
+ }
+
+ dl.beginBlock()
+ dl.txgroup("store bytes count 2 exceeds schema bytes count 1", &appcall1, &appcall2)
+
+ appcall1.GlobalStateSchema = basics.StateSchema{NumByteSlice: 2}
+ dl.txgroup("", &appcall1, &appcall2)
+ vb := dl.endBlock()
+ deltas := vb.Delta()
+
+ params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
+ require.True(t, ok)
+ state := params.Params.GlobalState
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
+ })
+}
+
+func TestGarbageClearState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ ClearStateProgram: []byte{},
+ }
+
+ dl.txn(&createTxn, "invalid program (empty)")
+
+ createTxn.ClearStateProgram = []byte{0xfe} // bad uvarint
+ dl.txn(&createTxn, "invalid version")
+ })
+}
+
+func TestRewardsInAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v15 put rewards into ApplyData
+ ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+ nonpartTxn := txntest.Txn{Type: protocol.KeyRegistrationTx, Sender: addrs[2], Nonparticipation: true}
+ payNonPart := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[2]}
+
+ if ver < 18 { // Nonpart reyreg happens in v18
+ dl.txn(&nonpartTxn, "tries to mark an account as nonparticipating")
+ } else {
+ dl.fullBlock(&nonpartTxn)
+ }
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ vb := dl.fullBlock(&payTxn, &payNonPart)
+ payInBlock := vb.Block().Payset[0]
+ nonPartInBlock := vb.Block().Payset[1]
+ if ver >= 15 {
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ // Sender is not due for more, and Receiver is nonpart
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ if ver < 18 {
+ require.Greater(t, nonPartInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ } else {
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
+ }
+ } else {
+ require.Zero(t, payInBlock.ApplyData.SenderRewards)
+ require.Zero(t, payInBlock.ApplyData.ReceiverRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
+ }
+ })
+}
+
+// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
+func TestDeleteNonExistantKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // AVM v2 (apps)
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ const appID basics.AppIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+byte "missing_global"
+app_global_del
+int 0
+byte "missing_local"
+app_local_del
+`),
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.OptInOC,
+ }
+
+ vb := dl.fullBlock(&createTxn, &optInTxn)
+ require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
+ // For a while, we encoded an empty localdelta
+ deltas := 1
+ if ver >= 27 {
+ deltas = 0
+ }
+ require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
+ })
+}
+
+func TestDuplicates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ Amount: 10,
+ }
+ dl.txn(&pay)
+ dl.txn(&pay, "transaction already in ledger")
+
+ // Test same transaction in a later block
+ dl.txn(&pay, "transaction already in ledger")
+
+ // Change the note so it can go in again
+ pay.Note = []byte("1")
+ dl.txn(&pay)
+
+ // Change note again, but try the txn twice in same group
+ if dl.generator.GenesisProto().MaxTxGroupSize > 1 {
+ pay.Note = []byte("2")
+ dl.txgroup("transaction already in ledger", &pay, &pay)
+ }
+ })
+}
+
+// TestHeaderAccess tests FirstValidTime and `block` which can access previous
+// block headers.
+func TestHeaderAccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Added in v34
+ ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ fvt := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ FirstValid: 0,
+ ApprovalProgram: "txn FirstValidTime",
+ }
+ dl.txn(&fvt, "round 0 is not available")
+
+ // advance current to 2
+ pay := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}
+ dl.fullBlock(&pay)
+
+ fvt.FirstValid = 1
+ dl.txn(&fvt, "round 0 is not available")
+
+ fvt.FirstValid = 2
+ dl.txn(&fvt) // current becomes 3
+
+ // Advance current round far enough to test access MaxTxnLife ago
+ for i := 0; i < int(config.Consensus[cv].MaxTxnLife); i++ {
+ dl.fullBlock()
+ }
+
+ // current should be 1003. Confirm.
+ require.EqualValues(t, 1002, dl.generator.Latest())
+ require.EqualValues(t, 1002, dl.validator.Latest())
+
+ fvt.FirstValid = 1003
+ fvt.LastValid = 1010
+ dl.txn(&fvt) // success advances the round
+ // now we're confident current is 1004, so construct a txn that is as
+ // old as possible, and confirm access.
+ fvt.FirstValid = 1004 - basics.Round(config.Consensus[cv].MaxTxnLife)
+ fvt.LastValid = 1004
+ dl.txn(&fvt)
+ })
+
+}
+
+// TestLogsInBlock ensures that logs appear in the block properly
+func TestLogsInBlock(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Run tests from v30 onward
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "byte \"APP\"\n log\n int 1",
+ // Fail the clear state
+ ClearStateProgram: "byte \"CLR\"\n log\n int 0",
+ }
+ vb := dl.fullBlock(&createTxn)
+ createInBlock := vb.Block().Payset[0]
+ appID := createInBlock.ApplyData.ApplicationID
+ require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.OptInOC,
+ }
+ vb = dl.fullBlock(&optInTxn)
+ optInInBlock := vb.Block().Payset[0]
+ require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
+
+ clearTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.ClearStateOC,
+ }
+ vb = dl.fullBlock(&clearTxn)
+ clearInBlock := vb.Block().Payset[0]
+ // Logs do not appear if the ClearState failed
+ require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
+ })
+}
+
+// TestUnfundedSenders confirms that accounts that don't even exist
+// can be the Sender in some situations. If some other transaction
+// covers the fee, and the transaction itself does not require an
+// asset or a min balance, it's fine.
+func TestUnfundedSenders(t *testing.T) {
+ /*
+ In a 0-fee transaction from unfunded sender, we still call balances.Move
+ to “pay” the fee. Move() does not short-circuit a Move of 0 (for good
+ reason, it allows compounding rewards). Therefore, in Move, we do
+ rewards processing on the unfunded account. Before
+ proto.UnfundedSenders, the rewards procesing would set the RewardsBase,
+ which would require the account be written to DB, and therefore the MBR
+ check would kick in (and fail). Now it skips the update if the account
+ has less than RewardsUnit, as the update is meaningless anyway.
+ */
+
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ asaIndex := basics.AssetIndex(1)
+
+ ghost := basics.Address{0x01}
+
+ asaCreate := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ Clawback: ghost,
+ Freeze: ghost,
+ Manager: ghost,
+ },
+ }
+
+ appCreate := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ dl.fullBlock(&asaCreate, &appCreate)
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ benefactor := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ Fee: 2000,
+ }
+
+ ephemeral := []txntest.Txn{
+ {
+ Type: "pay",
+ Amount: 0,
+ Sender: ghost,
+ Receiver: ghost,
+ Fee: 0,
+ },
+ { // Axfer of 0
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: basics.Address{0x02},
+ XferAsset: basics.AssetIndex(1),
+ Fee: 0,
+ },
+ { // Clawback
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: addrs[0],
+ AssetSender: addrs[1],
+ XferAsset: asaIndex,
+ Fee: 0,
+ },
+ { // Freeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: true,
+ Fee: 0,
+ },
+ { // Unfreeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: false,
+ Fee: 0,
+ },
+ { // App call
+ Type: "appl",
+ Sender: ghost,
+ ApplicationID: basics.AppIndex(2),
+ Fee: 0,
+ },
+ { // App creation (only works because it's also deleted)
+ Type: "appl",
+ Sender: ghost,
+ OnCompletion: transactions.DeleteApplicationOC,
+ Fee: 0,
+ },
+ }
+
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ for i, e := range ephemeral {
+ dl.txgroup(problem, benefactor.Noted(strconv.Itoa(i)), &e)
+ }
+ })
+}
+
+// TestAppCallAppDuringInit is similar to TestUnfundedSenders test, but now the
+// unfunded sender is a newly created app. The fee has been paid by the outer
+// transaction, so the app should be able to make an app call as that requires
+// no min balance.
+func TestAppCallAppDuringInit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ approve := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ // construct a simple app
+ vb := dl.fullBlock(&approve)
+
+ // now make a new app that calls it during init
+ approveID := vb.Block().Payset[0].ApplicationID
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ callInInit := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: `
+ itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn Applications 1
+ itxn_field ApplicationID
+ itxn_submit
+ int 1
+ `,
+ ForeignApps: []basics.AppIndex{approveID},
+ Fee: 2000, // Enough to have the inner fee paid for
+ }
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ dl.txn(&callInInit, problem)
+ })
+}
diff --git a/ledger/boxtxn_test.go b/ledger/boxtxn_test.go
new file mode 100644
index 000000000..92f188632
--- /dev/null
+++ b/ledger/boxtxn_test.go
@@ -0,0 +1,688 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+var boxAppSource = main(`
+ txn ApplicationArgs 0
+ byte "create" // create box named arg[1]
+ ==
+ txn ApplicationArgs 0
+ byte "recreate"
+ ==
+ ||
+ bz del
+ txn ApplicationArgs 1
+ int 24
+ txn NumAppArgs
+ int 2
+ ==
+ bnz default
+ pop // get rid of 24
+ txn ApplicationArgs 2
+ btoi
+ default:
+ txn ApplicationArgs 0
+ byte "recreate"
+ ==
+ bz first
+ box_create
+ !
+ assert
+ b end
+ first:
+ box_create
+ assert
+ b end
+ del: // delete box arg[1]
+ txn ApplicationArgs 0; byte "delete"; ==
+ bz set
+ txn ApplicationArgs 1
+ box_del
+ assert
+ b end
+ set: // put arg[2] at start of box arg[1]
+ txn ApplicationArgs 0; byte "set"; ==
+ bz put
+ txn ApplicationArgs 1
+ int 0
+ txn ApplicationArgs 2
+ box_replace
+ b end
+ put: // box_put arg[2] as replacement for box arg[1]
+ txn ApplicationArgs 0; byte "put"; ==
+ bz get
+ txn ApplicationArgs 1
+ txn ApplicationArgs 2
+ box_put
+ b end
+ get: // log box arg[1], after getting it with box_get
+ txn ApplicationArgs 0; byte "get"; ==
+ bz check
+ txn ApplicationArgs 1
+ box_get
+ assert
+ log
+ b end
+ check: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0; byte "check"; ==
+ bz bad
+ txn ApplicationArgs 1
+ int 0
+ txn ApplicationArgs 2
+ len
+ box_extract
+ txn ApplicationArgs 2
+ ==
+ assert
+ b end
+ bad:
+ err
+`)
+
+// Call the app in txn.Applications[1] the same way I was called.
+var passThruSource = main(`
+ itxn_begin
+ txn Applications 1; itxn_field ApplicationID
+ txn TypeEnum; itxn_field TypeEnum
+ // copy my app args into itxn app args (too lazy to write a loop), these are
+ // always called with 2 or 3 args.
+ txn ApplicationArgs 0; itxn_field ApplicationArgs
+ txn ApplicationArgs 1; itxn_field ApplicationArgs
+ txn NumAppArgs; int 2; ==; bnz skip
+ txn ApplicationArgs 2; itxn_field ApplicationArgs
+ skip:
+ itxn_submit
+`)
+
+const boxVersion = 36
+
+func boxFee(p config.ConsensusParams, nameAndValueSize uint64) uint64 {
+ return p.BoxFlatMinBalance + p.BoxByteMinBalance*(nameAndValueSize)
+}
+
+// TestBoxCreate tests MBR changes around allocation, deallocation
+func TestBoxCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // increment for a size 24 box with 4 letter name
+ proto := config.Consensus[cv]
+ mbr := boxFee(proto, 28)
+
+ appIndex := dl.fundedApp(addrs[0], proto.MinBalance+3*mbr, boxAppSource)
+
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ }
+
+ adam := call.Args("create", "adam")
+ dl.txn(adam, "invalid Box reference adam")
+ adam.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("adam")}}
+
+ dl.beginBlock()
+ dl.txn(adam)
+ vb := dl.endBlock()
+
+ // confirm the deltas has the creation
+ require.Len(t, vb.Delta().KvMods, 1)
+ for _, kvDelta := range vb.Delta().KvMods { // There's only one
+ require.Nil(t, kvDelta.OldData) // A creation has nil OldData
+ require.Len(t, kvDelta.Data, 24)
+ }
+
+ dl.txn(adam.Args("check", "adam", "\x00\x00"))
+ dl.txgroup("box_create\nassert", adam.Noted("one"), adam.Noted("two"))
+
+ bobo := call.Args("create", "bobo")
+ dl.txn(bobo, "invalid Box reference bobo")
+ bobo.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("bobo")}}
+ dl.txn(bobo)
+ dl.txgroup("box_create\nassert", bobo.Noted("one"), bobo.Noted("two"))
+
+ dl.beginBlock()
+ chaz := call.Args("create", "chaz")
+ chaz.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("chaz")}}
+ dl.txn(chaz)
+ dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dl.endBlock()
+
+ // new block
+ dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dogg := call.Args("create", "dogg")
+ dogg.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("dogg")}}
+ dl.txn(dogg, "below min")
+ dl.txn(chaz.Args("delete", "chaz"))
+ dl.txn(chaz.Args("delete", "chaz").Noted("again"), "box_del\nassert")
+ dl.txn(dogg)
+ dl.txn(bobo.Args("delete", "bobo"))
+
+ // empty name is illegal
+ empty := call.Args("create", "")
+ dl.txn(empty, "box names may not be zero")
+ // and, of course, that's true even if there's a box ref with the empty name
+ empty.Boxes = []transactions.BoxRef{{}}
+ dl.txn(empty, "box names may not be zero")
+ })
+}
+
+// TestBoxRecreate tests behavior when box_create is called for a box that already exists
+func TestBoxRecreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // increment for a size 4 box with 4 letter name
+ proto := config.Consensus[cv]
+ mbr := boxFee(proto, 8)
+
+ appIndex := dl.fundedApp(addrs[0], proto.MinBalance+mbr, boxAppSource)
+
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("adam")}},
+ }
+
+ create := call.Args("create", "adam", "\x04") // box value size is 4 bytes
+ recreate := call.Args("recreate", "adam", "\x04")
+
+ dl.txn(recreate, "box_create\n!\nassert")
+ dl.txn(create)
+ dl.txn(recreate)
+ dl.txn(call.Args("set", "adam", "\x01\x02\x03\x04"))
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04"))
+ dl.txn(recreate.Noted("again"))
+ // a recreate does not change the value
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04").Noted("after recreate"))
+ // recreating with a smaller size fails
+ dl.txn(call.Args("recreate", "adam", "\x03"), "box size mismatch 4 3")
+ // recreating with a larger size fails
+ dl.txn(call.Args("recreate", "adam", "\x05"), "box size mismatch 4 5")
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04").Noted("after failed recreates"))
+
+ // delete and actually create again
+ dl.txn(call.Args("delete", "adam"))
+ dl.txn(call.Args("create", "adam", "\x03"))
+
+ dl.txn(call.Args("set", "adam", "\x03\x02\x01"))
+ dl.txn(call.Args("check", "adam", "\x03\x02\x01"))
+ dl.txn(recreate.Noted("after delete"), "box size mismatch 3 4")
+ dl.txn(call.Args("recreate", "adam", "\x03"))
+ dl.txn(call.Args("check", "adam", "\x03\x02\x01").Noted("after delete and recreate"))
+ })
+}
+
+func TestBoxCreateAvailability(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ accessInCreate := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: 0, // This is a create
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("hello")}},
+ ApprovalProgram: `
+ byte "hello"
+ int 10
+ box_create
+`,
+ }
+
+ // We know box_create worked because we finished and checked MBR
+ dl.txn(&accessInCreate, "balance 0 below min")
+
+ // But let's fund it and be sure. This is "psychic". We're going to fund
+ // the app address that we know the app will get. So this is a nice
+ // test, but unrealistic way to actual create a box.
+ psychic := basics.AppIndex(2)
+
+ proto := config.Consensus[cv]
+ dl.txn(&txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: psychic.Address(),
+ Amount: proto.MinBalance + boxFee(proto, 15),
+ })
+ dl.txn(&accessInCreate)
+
+ // Now, a more realistic, though tricky, way to get a box created during
+ // the app's first txgroup in existence is to create it in tx0, and then
+ // in tx1 fund it using an inner tx, then invoke it with an inner
+ // transaction. During that invocation, the app will have access to the
+ // boxes supplied as "0 refs", since they were resolved to the app ID
+ // during creation.
+
+ accessWhenCalled := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: 0, // This is a create
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("hello")}},
+ // Note that main() wraps the program so it does not run at creation time.
+ ApprovalProgram: main(`
+ byte "hello"
+ int 10
+ box_create
+ assert
+ byte "we did it"
+ log
+`),
+ }
+
+ trampoline := dl.fundedApp(addrs[0], 1_000_000, main(`
+ // Fund the app created in the txn behind me.
+ txn GroupIndex
+ int 1
+ -
+ gtxns CreatedApplicationID
+ dup // copy for use when calling
+ dup // test copy
+ assert
+ app_params_get AppAddress
+ assert
+
+ itxn_begin
+ itxn_field Receiver
+ int 500000
+ itxn_field Amount
+ int pay
+ itxn_field TypeEnum
+ itxn_submit
+
+ // Now invoke it, so it can intialize (and create the "hello" box)
+ itxn_begin
+ itxn_field ApplicationID
+ int appl
+ itxn_field TypeEnum
+ itxn_submit
+`))
+
+ call := txntest.Txn{
+ Sender: addrs[0],
+ Type: "appl",
+ ApplicationID: trampoline,
+ }
+
+ dl.beginBlock()
+ dl.txgroup("", &accessWhenCalled, &call)
+ vb := dl.endBlock()
+
+ // Make sure that we actually did it.
+ require.Equal(t, "we did it", vb.Block().Payset[1].ApplyData.EvalDelta.InnerTxns[1].EvalDelta.Logs[0])
+ })
+}
+
+// TestBoxRW tests reading writing boxes in consecutive transactions
+func TestBoxRW(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ t.Parallel()
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ var bufNewLogger bytes.Buffer
+ log := logging.NewLogger()
+ log.SetOutput(&bufNewLogger)
+
+ appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+
+ dl.txn(call.Args("create", "x", "\x10")) // 16
+ dl.beginBlock()
+ dl.txn(call.Args("set", "x", "ABCDEFGHIJ")) // 10 long
+ vb := dl.endBlock()
+ // confirm the deltas has the change, including the old value
+ require.Len(t, vb.Delta().KvMods, 1)
+ for _, kvDelta := range vb.Delta().KvMods { // There's only one
+ require.Equal(t, kvDelta.OldData,
+ []byte(strings.Repeat("\x00", 16)))
+ require.Equal(t, kvDelta.Data,
+ []byte("ABCDEFGHIJ\x00\x00\x00\x00\x00\x00"))
+ }
+
+ dl.txn(call.Args("check", "x", "ABCDE"))
+ dl.txn(call.Args("check", "x", "ABCDEFGHIJ"))
+ dl.txn(call.Args("check", "x", "ABCDEFGHIJ\x00"))
+
+ dl.txn(call.Args("delete", "x"))
+ dl.txn(call.Args("check", "x", "ABC"), "no such box")
+ dl.txn(call.Args("create", "x", "\x08"))
+ dl.txn(call.Args("check", "x", "\x00")) // it was cleared
+ dl.txn(call.Args("set", "x", "ABCDEFGHIJ"), "replacement end 10")
+ dl.txn(call.Args("check", "x", "\x00")) // still clear
+ dl.txn(call.Args("set", "x", "ABCDEFGH"))
+ dl.txn(call.Args("check", "x", "ABCDEFGH\x00"), "extraction end 9")
+ dl.txn(call.Args("check", "x", "ABCDEFGH"))
+ dl.txn(call.Args("set", "x", "ABCDEFGHI"), "replacement end 9")
+
+ // Advance more than 320 rounds, ensure box is still there
+ for i := 0; i < 330; i++ {
+ dl.fullBlock()
+ }
+ time.Sleep(5 * time.Second) // balancesFlushInterval, so commit happens
+ dl.fullBlock(call.Args("check", "x", "ABCDEFGH"))
+ time.Sleep(100 * time.Millisecond) // give commit time to run, and prune au caches
+ dl.fullBlock(call.Args("check", "x", "ABCDEFGH"))
+
+ dl.txn(call.Args("create", "yy"), "invalid Box reference yy")
+ withBr := call.Args("create", "yy")
+ withBr.Boxes = append(withBr.Boxes, transactions.BoxRef{Index: 1, Name: []byte("yy")})
+ require.Error(dl.t, withBr.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.GenesisProto()))
+ withBr.Boxes[1].Index = 0
+ dl.txn(withBr)
+ })
+}
+
+// TestBoxAccountData tests that an account's data changes when boxes are created
+func TestBoxAccountData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ uint64ToArgStr := func(i uint64) string {
+ encoded := make([]byte, 8)
+ binary.BigEndian.PutUint64(encoded, i)
+ return string(encoded)
+ }
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ proto := config.Consensus[cv]
+
+ var bufNewLogger bytes.Buffer
+ log := logging.NewLogger()
+ log.SetOutput(&bufNewLogger)
+
+ appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}, {Index: 0, Name: []byte("y")}},
+ }
+
+ verifyAppSrc := main(`
+txn ApplicationArgs 0
+btoi
+txn Accounts 1
+acct_params_get AcctMinBalance
+assert
+==
+assert
+
+txn ApplicationArgs 1
+btoi
+txn Accounts 1
+acct_params_get AcctTotalBoxes
+assert
+==
+assert
+
+txn ApplicationArgs 2
+btoi
+txn Accounts 1
+acct_params_get AcctTotalBoxBytes
+assert
+==
+assert
+`)
+ verifyAppIndex := dl.fundedApp(addrs[0], 0, verifyAppSrc)
+ verifyAppCall := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: verifyAppIndex,
+ Accounts: []basics.Address{appIndex.Address()},
+ }
+
+ // The app account has no box data initially
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance), "\x00", "\x00"))
+
+ dl.txn(call.Args("create", "x", "\x10")) // 16
+
+ // It gets updated when a new box is created
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+proto.BoxFlatMinBalance+17*proto.BoxByteMinBalance), "\x01", "\x11"))
+
+ dl.txn(call.Args("create", "y", "\x05"))
+
+ // And again
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+2*proto.BoxFlatMinBalance+23*proto.BoxByteMinBalance), "\x02", "\x17"))
+
+ // Advance more than 320 rounds, ensure box is still there
+ for i := 0; i < 330; i++ {
+ dl.fullBlock()
+ }
+ time.Sleep(5 * time.Second) // balancesFlushInterval, so commit happens
+ dl.fullBlock(call.Args("check", "x", string(make([]byte, 16))))
+ time.Sleep(100 * time.Millisecond) // give commit time to run, and prune au caches
+ dl.fullBlock(call.Args("check", "x", string(make([]byte, 16))))
+
+ // Still the same after caches are flushed
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+2*proto.BoxFlatMinBalance+23*proto.BoxByteMinBalance), "\x02", "\x17"))
+
+ dl.txns(call.Args("delete", "x"), call.Args("delete", "y"))
+
+ // Data gets removed after boxes are deleted
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance), "\x00", "\x00"))
+ })
+}
+
+func TestBoxIOBudgets(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ appIndex := dl.fundedApp(addrs[0], 0, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (1024) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (2048) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (3072) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // now there are 4 box refs
+ "below min") // big box would need more balance
+ dl.txn(call.Args("create", "x", "\x10\x01"), // 4097
+ "write budget (4096) exceeded")
+
+ // Create 4,096 byte box
+ proto := config.Consensus[cv]
+ fundApp := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: proto.MinBalance + boxFee(proto, 4096+1), // remember key len!
+ }
+ create := call.Args("create", "x", "\x10\x00")
+
+ // Slight detour - Prove insufficient funding fails creation.
+ fundApp.Amount--
+ dl.txgroup("below min", &fundApp, create)
+ fundApp.Amount++
+
+ // Confirm desired creation happens.
+ dl.txgroup("", &fundApp, create)
+
+ // Now that we've created a 4,096 byte box, test READ budget
+ // It works at the start, because call still has 4 brs.
+ dl.txn(call.Args("check", "x", "\x00"))
+ call.Boxes = call.Boxes[:3]
+ dl.txn(call.Args("check", "x", "\x00"),
+ "box read budget (3072) exceeded")
+
+ // Give a budget over 32768, confirm failure anyway
+ empties := [32]transactions.BoxRef{}
+ // These tests skip WellFormed, so the huge Boxes is ok
+ call.Boxes = append(call.Boxes, empties[:]...)
+ dl.txn(call.Args("create", "x", "\x80\x01"), "box size too large") // 32769
+ })
+}
+
+// TestBoxInners trys various box manipulations through inner transactions
+func TestBoxInners(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // Advance the creatable counter, so we don't have very low app ids that
+ // could be mistaken for indices into ForeignApps.
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+
+ boxIndex := dl.fundedApp(addrs[0], 2_000_000, boxAppSource) // there are some big boxes made
+ passIndex := dl.fundedApp(addrs[0], 120_000, passThruSource) // lowish, show it's not paying for boxes
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: passIndex,
+ ForeignApps: []basics.AppIndex{boxIndex},
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+ // The current Boxes gives top-level access to "x", not the inner app
+ dl.txn(call.Args("create", "x", "\x10"), // 8
+ "invalid Box reference x")
+
+ // This isn't right: Index should be index into ForeignApps
+ call.Boxes = []transactions.BoxRef{{Index: uint64(boxIndex), Name: []byte("x")}}
+ require.Error(t, call.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.genesisProto))
+
+ call.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("x")}}
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (1024) exceeded")
+ dl.txn(call.Args("create", "x", "\x04\x00")) // 1024
+ call.Boxes = append(call.Boxes, transactions.BoxRef{Index: 1, Name: []byte("y")})
+ dl.txn(call.Args("create", "y", "\x08\x00")) // 2048
+
+ require.Len(t, call.Boxes, 2)
+ setX := call.Args("set", "x", "A")
+ dl.txn(setX, "read budget") // Boxes has x and y, their combined length is too big
+ setX.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("x")}}
+ dl.txn(setX)
+
+ setY := call.Args("set", "y", "B")
+ dl.txn(setY, "read budget") // Boxes has x and y, their combined length is too big
+ setY.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("y")}}
+ dl.txn(setY, "read budget") // Y is bigger needs more than 1 br
+ // We recommend "empty" br, but a duplicate is also ok
+ setY.Boxes = append(setY.Boxes, transactions.BoxRef{Index: 1, Name: []byte("y")})
+ dl.txn(setY) // len(y) = 2048, io budget is 2*1024 right now
+
+ // non-existent box also works
+ setY.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("y")}, {Index: 0, Name: []byte("nope")}}
+ dl.txn(setY) // len(y) = 2048, io budget is 2*1024 right now
+
+ // now show can read both boxes based on brs in tx1
+ checkX := call.Args("check", "x", "A")
+ checkX.Boxes = nil
+ checkY := call.Args("check", "y", "B")
+ require.Len(t, checkY.Boxes, 2)
+ // can't see x and y because read budget is only 2*1024
+ dl.txgroup("box read budget", checkX, checkY)
+ checkY.Boxes = append(checkY.Boxes, transactions.BoxRef{})
+ dl.txgroup("", checkX, checkY)
+
+ require.Len(t, setY.Boxes, 2) // recall that setY has ("y", "nope") right now. no "x"
+ dl.txgroup("invalid Box reference x", checkX, setY)
+
+ setY.Boxes = append(setY.Boxes, transactions.BoxRef{Index: 1, Name: []byte("x")})
+ dl.txgroup("", checkX, setY)
+
+ // Cleanup
+ dl.txn(call.Args("del", "x"), "read budget")
+ dl.txn(call.Args("del", "y"), "read budget")
+ // surprising but correct: they work when combined, because both txns
+ // have both box refs, so the read budget goes up.
+ dl.txgroup("", call.Args("delete", "x"), call.Args("delete", "y"))
+
+ // Try some get/put action
+ dl.txn(call.Args("put", "x", "john doe"))
+ vb := dl.fullBlock(call.Args("get", "x"))
+ // we are passing this thru to the underlying box app which logs the get
+ require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+ dl.txn(call.Args("check", "x", "john"))
+
+ // bad change because of length
+ dl.txn(call.Args("put", "x", "steve doe"), "box_put wrong size")
+ vb = dl.fullBlock(call.Args("get", "x"))
+ require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+
+ // good change
+ dl.txn(call.Args("put", "x", "mark doe"))
+ dl.txn(call.Args("check", "x", "mark d"))
+ })
+}
diff --git a/ledger/catchpointfileheader.go b/ledger/catchpointfileheader.go
index 741e13aa0..3b4600270 100644
--- a/ledger/catchpointfileheader.go
+++ b/ledger/catchpointfileheader.go
@@ -33,6 +33,7 @@ type CatchpointFileHeader struct {
Totals ledgercore.AccountTotals `codec:"accountTotals"`
TotalAccounts uint64 `codec:"accountsCount"`
TotalChunks uint64 `codec:"chunksCount"`
+ TotalKVs uint64 `codec:"kvsCount"`
Catchpoint string `codec:"catchpoint"`
BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"`
}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 5cfe0f3c4..584a9447d 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -18,6 +18,7 @@ package ledger
import (
"archive/tar"
+ "bytes"
"compress/gzip"
"context"
"database/sql"
@@ -62,9 +63,6 @@ const (
trieRebuildAccountChunkSize = 16384
// trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
trieRebuildCommitFrequency = 65536
- // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
- // we attempt to commit them to disk while writing a batch of rounds balances to disk.
- trieAccumulatedChangesFlush = 256
// CatchpointDirName represents the directory name in which all the catchpoints files are stored
CatchpointDirName = "catchpoints"
@@ -212,17 +210,18 @@ func (ct *catchpointTracker) GetLastCatchpointLabel() string {
func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, updatingBalancesDuration time.Duration) error {
ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound)
+ var totalKVs uint64
var totalAccounts uint64
var totalChunks uint64
var biggestChunkLen uint64
if ct.enableGeneratingCatchpointFiles {
- // Generate the catchpoint file. This need to be done inline so that it will
- // block any new accounts that from being written. generateCatchpointData()
- // expects that the accounts data would not be modified in the background during
- // it's execution.
+ // Generate the catchpoint file. This is done inline so that it will
+ // block any new accounts from being written. generateCatchpointData()
+ // expects that the accounts data would not be modified in the
+ // background during its execution.
var err error
- totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
+ totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
ctx, dbRound, updatingBalancesDuration)
atomic.StoreInt32(&ct.catchpointDataWriting, 0)
if err != nil {
@@ -231,7 +230,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
}
f := func(ctx context.Context, tx *sql.Tx) error {
- err := ct.recordFirstStageInfo(ctx, tx, dbRound, totalAccounts, totalChunks, biggestChunkLen)
+ err := ct.recordFirstStageInfo(ctx, tx, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen)
if err != nil {
return err
}
@@ -340,7 +339,7 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou
close(ct.catchpointDataSlowWriting)
err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return ct.accountsInitializeHashes(ctx, tx, dbRound)
+ return ct.initializeHashes(ctx, tx, dbRound)
})
if err != nil {
return err
@@ -534,7 +533,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d
dcc.stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
}
- err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas)
+ err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.oldBase, dcc.newBase)
if err != nil {
return err
}
@@ -647,6 +646,11 @@ func doRepackCatchpoint(ctx context.Context, header CatchpointFileHeader, bigges
}
}
+// repackCatchpoint takes the header (that must be made "late" in order to have
+// the latest blockhash) and the (snappy compressed) catchpoint data from
+// dataPath and regurgitates it to look like catchpoints have always looked - a
+// tar file with the header in the first "file" and the catchpoint data in file
+// chunks, all compressed with gzip instead of snappy.
func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestChunkLen uint64, dataPath string, outPath string) error {
// Initialize streams.
fin, err := os.OpenFile(dataPath, os.O_RDONLY, 0666)
@@ -758,6 +762,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound
BlocksRound: round,
Totals: dataInfo.Totals,
TotalAccounts: dataInfo.TotalAccounts,
+ TotalKVs: dataInfo.TotalKVs,
TotalChunks: dataInfo.TotalChunks,
Catchpoint: label,
BlockHeaderDigest: blockHash,
@@ -796,6 +801,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound
ct.log.With("accountsRound", accountsRound).
With("writingDuration", uint64(time.Since(startTime).Nanoseconds())).
With("accountsCount", dataInfo.TotalAccounts).
+ With("kvsCount", dataInfo.TotalKVs).
With("fileSize", fileInfo.Size()).
With("catchpointLabel", label).
Infof("Catchpoint file was created")
@@ -925,7 +931,7 @@ func (ct *catchpointTracker) close() {
}
// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
-func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas) (err error) {
+func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas, kvDeltas map[string]modifiedKvValue, oldBase basics.Round, newBase basics.Round) (err error) {
if !ct.catchpointEnabled() {
return nil
}
@@ -965,15 +971,10 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
resDelta := resourcesDeltas.getByIdx(i)
addr := resDelta.address
if !resDelta.oldResource.data.IsEmpty() {
- var ctype basics.CreatableType
- if resDelta.oldResource.data.IsAsset() {
- ctype = basics.AssetCreatable
- } else if resDelta.oldResource.data.IsApp() {
- ctype = basics.AppCreatable
- } else {
- return fmt.Errorf("unknown old creatable for addr %s (%d), aidx %d, data %v", addr.String(), resDelta.oldResource.addrid, resDelta.oldResource.aidx, resDelta.oldResource.data)
+ deleteHash, err := resourcesHashBuilderV6(&resDelta.oldResource.data, addr, resDelta.oldResource.aidx, resDelta.oldResource.data.UpdateRound, protocol.Encode(&resDelta.oldResource.data))
+ if err != nil {
+ return err
}
- deleteHash := resourcesHashBuilderV6(addr, resDelta.oldResource.aidx, ctype, uint64(resDelta.oldResource.data.UpdateRound), protocol.Encode(&resDelta.oldResource.data))
deleted, err = ct.balancesTrie.Delete(deleteHash)
if err != nil {
return fmt.Errorf("failed to delete resource hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
@@ -986,15 +987,10 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
}
if !resDelta.newResource.IsEmpty() {
- var ctype basics.CreatableType
- if resDelta.newResource.IsAsset() {
- ctype = basics.AssetCreatable
- } else if resDelta.newResource.IsApp() {
- ctype = basics.AppCreatable
- } else {
- return fmt.Errorf("unknown new creatable for addr %s, aidx %d, data %v", addr.String(), resDelta.oldResource.aidx, resDelta.newResource)
+ addHash, err := resourcesHashBuilderV6(&resDelta.newResource, addr, resDelta.oldResource.aidx, resDelta.newResource.UpdateRound, protocol.Encode(&resDelta.newResource))
+ if err != nil {
+ return err
}
- addHash := resourcesHashBuilderV6(addr, resDelta.oldResource.aidx, ctype, uint64(resDelta.newResource.UpdateRound), protocol.Encode(&resDelta.newResource))
added, err = ct.balancesTrie.Add(addHash)
if err != nil {
return fmt.Errorf("attempted to add duplicate resource hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
@@ -1007,19 +1003,68 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
}
}
- if accumulatedChanges >= trieAccumulatedChangesFlush {
- accumulatedChanges = 0
- _, err = ct.balancesTrie.Commit()
- if err != nil {
- return
+ for key, mv := range kvDeltas {
+ if mv.oldData == nil && mv.data == nil { // Came and went within the delta span
+ continue
+ }
+ if mv.oldData != nil {
+ // reminder: check mv.data for nil here, b/c bytes.Equal conflates nil and "".
+ if mv.data != nil && bytes.Equal(mv.oldData, mv.data) {
+ continue // changed back within the delta span
+ }
+ deleteHash := kvHashBuilderV6(key, mv.oldData)
+ deleted, err = ct.balancesTrie.Delete(deleteHash)
+ if err != nil {
+ return fmt.Errorf("failed to delete kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(deleteHash), key, err)
+ }
+ if !deleted {
+ ct.log.Warnf("failed to delete kv hash '%s' from merkle trie for key %v", hex.EncodeToString(deleteHash), key)
+ } else {
+ accumulatedChanges++
+ }
+ }
+
+ if mv.data != nil {
+ addHash := kvHashBuilderV6(key, mv.data)
+ added, err = ct.balancesTrie.Add(addHash)
+ if err != nil {
+ return fmt.Errorf("attempted to add duplicate kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(addHash), key, err)
+ }
+ if !added {
+ ct.log.Warnf("attempted to add duplicate kv hash '%s' from merkle trie for key %v", hex.EncodeToString(addHash), key)
+ } else {
+ accumulatedChanges++
+ }
}
}
// write it all to disk.
+ var cstats merkletrie.CommitStats
if accumulatedChanges > 0 {
- _, err = ct.balancesTrie.Commit()
+ cstats, err = ct.balancesTrie.Commit()
}
+ if ct.log.GetTelemetryEnabled() {
+ root, rootErr := ct.balancesTrie.RootHash()
+ if rootErr != nil {
+ ct.log.Infof("accountsUpdateBalances: error retrieving balances trie root: %v", rootErr)
+ return
+ }
+ ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointRootUpdateEvent, telemetryspec.CatchpointRootUpdateEventDetails{
+ Root: root.String(),
+ OldBase: uint64(oldBase),
+ NewBase: uint64(newBase),
+ NewPageCount: cstats.NewPageCount,
+ NewNodeCount: cstats.NewNodeCount,
+ UpdatedPageCount: cstats.UpdatedPageCount,
+ UpdatedNodeCount: cstats.UpdatedNodeCount,
+ DeletedPageCount: cstats.DeletedPageCount,
+ FanoutReallocatedNodeCount: cstats.FanoutReallocatedNodeCount,
+ PackingReallocatedNodeCount: cstats.PackingReallocatedNodeCount,
+ LoadedPages: cstats.LoadedPages,
+ })
+
+ }
return
}
@@ -1030,7 +1075,7 @@ func (ct *catchpointTracker) IsWritingCatchpointDataFile() bool {
}
// Generates a (first stage) catchpoint data file.
-func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, updatingBalancesDuration time.Duration) (uint64 /*totalAccounts*/, uint64 /*totalChunks*/, uint64 /*biggestChunkLen*/, error) {
+func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, updatingBalancesDuration time.Duration) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, err error) {
ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound)
startTime := time.Now()
@@ -1056,13 +1101,13 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
var catchpointWriter *catchpointWriter
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
- err := ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ err = ct.dbs.Rdb.AtomicContext(ctx, func(dbCtx context.Context, tx *sql.Tx) (err error) {
+ catchpointWriter, err = makeCatchpointWriter(dbCtx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk)
if err != nil {
return
}
for more {
- stepCtx, stepCancelFunction := context.WithTimeout(ctx, chunkExecutionDuration)
+ stepCtx, stepCancelFunction := context.WithTimeout(dbCtx, chunkExecutionDuration)
writeStepStartTime := time.Now()
more, err = catchpointWriter.WriteStep(stepCtx)
// accumulate the actual time we've spent writing in this step.
@@ -1084,7 +1129,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
if chunkExecutionDuration > longChunkExecutionDuration {
chunkExecutionDuration = longChunkExecutionDuration
}
- case <-ctx.Done():
+ case <-dbCtx.Done():
//retryCatchpointCreation = true
err2 := catchpointWriter.Abort()
if err2 != nil {
@@ -1111,26 +1156,28 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
if err != nil {
ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err)
- return 0, 0, 0, err
+ return 0, 0, 0, 0, err
}
- catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
+ catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes)
catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds())
- catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
+ catchpointGenerationStats.AccountsCount = catchpointWriter.totalAccounts
+ catchpointGenerationStats.KVsCount = catchpointWriter.totalKVs
ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
ct.log.With("accountsRound", accountsRound).
With("writingDuration", catchpointGenerationStats.WritingDuration).
With("CPUTime", catchpointGenerationStats.CPUTime).
With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
With("accountsCount", catchpointGenerationStats.AccountsCount).
+ With("kvsCount", catchpointGenerationStats.KVsCount).
With("fileSize", catchpointGenerationStats.FileSize).
With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
Infof("Catchpoint data file was generated")
- return catchpointWriter.GetTotalAccounts(), catchpointWriter.GetTotalChunks(), catchpointWriter.GetBiggestChunkLen(), nil
+ return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil
}
-func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.Tx, accountsRound basics.Round, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error {
+func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.Tx, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error {
accountTotals, err := accountsTotals(ctx, tx, false)
if err != nil {
return err
@@ -1159,6 +1206,7 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.T
info := catchpointFirstStageInfo{
Totals: accountTotals,
TotalAccounts: totalAccounts,
+ TotalKVs: totalKVs,
TotalChunks: totalChunks,
BiggestChunkLen: biggestChunkLen,
TrieBalancesHash: trieBalancesHash,
@@ -1357,47 +1405,98 @@ func removeSingleCatchpointFileFromDisk(dbDirectory, fileToDelete string) (err e
return nil
}
+func hashBufV6(affinity uint64, kind hashKind) []byte {
+ hash := make([]byte, 4+crypto.DigestSize)
+ // write out the lowest 32 bits of the affinity value. This should improve
+ // the caching of the trie by allowing recent updates to be in-cache, and
+ // "older" nodes will be left alone.
+ for i, prefix := 3, affinity; i >= 0; i, prefix = i-1, prefix>>8 {
+ // the following takes the prefix & 255 -> hash[i]
+ hash[i] = byte(prefix)
+ }
+ hash[hashKindEncodingIndex] = byte(kind)
+ return hash
+}
+
+func finishV6(v6hash []byte, prehash []byte) []byte {
+ entryHash := crypto.Hash(prehash)
+ copy(v6hash[5:], entryHash[1:])
+ return v6hash[:]
+
+}
+
// accountHashBuilderV6 calculates the hash key used for the trie by combining the account address and the account data
func accountHashBuilderV6(addr basics.Address, accountData *baseAccountData, encodedAccountData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
hashIntPrefix := accountData.UpdateRound
if hashIntPrefix == 0 {
hashIntPrefix = accountData.RewardsBase
}
+ hash := hashBufV6(hashIntPrefix, accountHK)
// write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
// recent updated to be in-cache, and "older" nodes will be left alone.
- for i, prefix := 3, hashIntPrefix; i >= 0; i, prefix = i-1, prefix>>8 {
- // the following takes the prefix & 255 -> hash[i]
- hash[i] = byte(prefix)
- }
- hash[4] = 0 // set the 5th byte to zero to indicate it's a account base record hash
prehash := make([]byte, crypto.DigestSize+len(encodedAccountData))
copy(prehash[:], addr[:])
copy(prehash[crypto.DigestSize:], encodedAccountData[:])
- entryHash := crypto.Hash(prehash)
- copy(hash[5:], entryHash[1:])
- return hash[:]
+
+ return finishV6(hash, prehash)
}
-// accountHashBuilderV6 calculates the hash key used for the trie by combining the account address and the account data
-func resourcesHashBuilderV6(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, updateRound uint64, encodedResourceData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
- // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
- // recent updated to be in-cache, and "older" nodes will be left alone.
- for i, prefix := 3, updateRound; i >= 0; i, prefix = i-1, prefix>>8 {
- // the following takes the prefix & 255 -> hash[i]
- hash[i] = byte(prefix)
+// hashKind enumerates the possible data types hashed into a catchpoint merkle
+// trie. Each merkle trie hash includes the hashKind byte at a known-offset.
+// By encoding hashKind at a known-offset, it's possible for hash readers to
+// disambiguate the hashed resource.
+//go:generate stringer -type=hashKind
+type hashKind byte
+
+// Defines known kinds of hashes. Changing an enum ordinal value is a
+// breaking change.
+const (
+ accountHK hashKind = iota
+ assetHK
+ appHK
+ kvHK
+)
+
+// hashKindEncodingIndex defines the []byte offset where the hash kind is
+// encoded.
+const hashKindEncodingIndex = 4
+
+func rdGetCreatableHashKind(rd *resourcesData, a basics.Address, ci basics.CreatableIndex) (hashKind, error) {
+ if rd.IsAsset() {
+ return assetHK, nil
+ } else if rd.IsApp() {
+ return appHK, nil
+ }
+ return accountHK, fmt.Errorf("unknown creatable for addr %s, aidx %d, data %v", a.String(), ci, rd)
+}
+
+// resourcesHashBuilderV6 calculates the hash key used for the trie by combining the creatable's resource data and its index
+func resourcesHashBuilderV6(rd *resourcesData, addr basics.Address, cidx basics.CreatableIndex, updateRound uint64, encodedResourceData []byte) ([]byte, error) {
+ hk, err := rdGetCreatableHashKind(rd, addr, cidx)
+ if err != nil {
+ return nil, err
}
- hash[4] = byte(ctype + 1) // set the 5th byte to one or two ( asset / application ) so we could differentiate the hashes.
+
+ hash := hashBufV6(updateRound, hk)
prehash := make([]byte, 8+crypto.DigestSize+len(encodedResourceData))
copy(prehash[:], addr[:])
binary.LittleEndian.PutUint64(prehash[crypto.DigestSize:], uint64(cidx))
copy(prehash[crypto.DigestSize+8:], encodedResourceData[:])
- entryHash := crypto.Hash(prehash)
- copy(hash[5:], entryHash[1:])
- return hash[:]
+
+ return finishV6(hash, prehash), nil
+}
+
+// kvHashBuilderV6 calculates the hash key used for the trie by combining the key and value
+func kvHashBuilderV6(key string, value []byte) []byte {
+ hash := hashBufV6(0, kvHK)
+
+ prehash := make([]byte, len(key)+len(value))
+ copy(prehash[:], key)
+ copy(prehash[len(key):], value)
+
+ return finishV6(hash, prehash)
}
// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
@@ -1418,9 +1517,9 @@ func (ct *catchpointTracker) catchpointEnabled() bool {
return ct.catchpointInterval != 0
}
-// accountsInitializeHashes initializes account hashes.
+// initializeHashes initializes account/resource/kv hashes.
// as part of the initialization, it tests if a hash table matches to account base and updates the former.
-func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error {
+func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error {
hashRound, err := accountsHashRound(ctx, tx)
if err != nil {
return err
@@ -1442,24 +1541,24 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
// create the merkle trie for the balances
committer, err := MakeMerkleCommitter(tx, false)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
+ return fmt.Errorf("initializeHashes was unable to makeMerkleCommitter: %v", err)
}
trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
+ return fmt.Errorf("initializeHashes was unable to MakeTrie: %v", err)
}
// we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
// we can figure this out by examining the hash of the root:
rootHash, err := trie.RootHash()
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
+ return fmt.Errorf("initializeHashes was unable to retrieve trie root hash: %v", err)
}
if rootHash.IsZero() {
- ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize, DefaultMaxResourcesPerChunk)
+ ct.log.Infof("initializeHashes rebuilding merkle trie for round %d", rnd)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
defer accountBuilderIt.Close(ctx)
startTrieBuildTime := time.Now()
trieHashCount := 0
@@ -1481,16 +1580,16 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
for _, acct := range accts {
added, err := trie.Add(acct.digest)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
+ return fmt.Errorf("initializeHashes was unable to add acct to trie: %v", err)
}
if !added {
- // we need to transalate the "addrid" into actual account address so that
+ // we need to translate the "addrid" into actual account address so that
// we can report the failure.
addr, err := lookupAccountAddressFromAddressID(ctx, tx, acct.addrid)
if err != nil {
- ct.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.digest), acct.addrid, err)
+ ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.digest), acct.addrid, err)
} else {
- ct.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), addr)
+ ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), addr)
}
}
}
@@ -1500,14 +1599,14 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
// if anything goes wrong, it will still get rolled back.
_, err = trie.Evict(true)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err)
}
pendingTrieHashes = 0
}
if time.Since(lastRebuildTime) > 5*time.Second {
// let the user know that the trie is still being rebuilt.
- ct.log.Infof("accountsInitialize still building the trie, and processed so far %d trie entries", trieHashCount)
+ ct.log.Infof("initializeHashes still building the trie, and processed so far %d trie entries", trieHashCount)
lastRebuildTime = time.Now()
}
} else if processedRows > 0 {
@@ -1515,7 +1614,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
// if it's not ordered, we can ignore it for now; we'll just increase the counters and emit logs periodically.
if time.Since(lastRebuildTime) > 5*time.Second {
// let the user know that the trie is still being rebuilt.
- ct.log.Infof("accountsInitialize still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
+ ct.log.Infof("initializeHashes still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
lastRebuildTime = time.Now()
}
}
@@ -1525,16 +1624,59 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
// if anything goes wrong, it will still get rolled back.
_, err = trie.Evict(true)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err)
+ }
+
+ // Now add the kvstore hashes
+ pendingTrieHashes = 0
+ kvs, err := tx.QueryContext(ctx, "SELECT key, value FROM kvstore")
+ if err != nil {
+ return err
+ }
+ defer kvs.Close()
+ for kvs.Next() {
+ var k []byte
+ var v []byte
+ err := kvs.Scan(&k, &v)
+ if err != nil {
+ return err
+ }
+ hash := kvHashBuilderV6(string(k), v)
+ trieHashCount++
+ pendingTrieHashes++
+ added, err := trie.Add(hash)
+ if err != nil {
+ return fmt.Errorf("initializeHashes was unable to add kv (key=%s) to trie: %v", hex.EncodeToString(k), err)
+ }
+ if !added {
+ ct.log.Warnf("initializeHashes attempted to add duplicate kv hash '%s' to merkle trie for key %s", hex.EncodeToString(hash), k)
+ }
+ if pendingTrieHashes >= trieRebuildCommitFrequency {
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err)
+ }
+ pendingTrieHashes = 0
+ }
+ // We could insert code to report things every 5 seconds, like was done for accounts.
+ }
+
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err)
}
// we've just updated the merkle trie, update the hashRound to reflect that.
err = updateAccountsHashRound(ctx, tx, rnd)
if err != nil {
- return fmt.Errorf("accountsInitialize was unable to update the account hash round to %d: %v", rnd, err)
+ return fmt.Errorf("initializeHashes was unable to update the account hash round to %d: %v", rnd, err)
}
- ct.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", trieHashCount, time.Since(startTrieBuildTime))
+ ct.log.Infof("initializeHashes rebuilt the merkle trie with %d entries in %v", trieHashCount, time.Since(startTrieBuildTime))
}
ct.balancesTrie = trie
return nil
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index a12e6fa9a..7e9b4d19a 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -19,6 +19,7 @@ package ledger
import (
"context"
"database/sql"
+ "encoding/hex"
"errors"
"fmt"
"os"
@@ -296,7 +297,7 @@ func TestRecordCatchpointFile(t *testing.T) {
for _, round := range []basics.Round{2000000, 3000010, 3000015, 3000020} {
accountsRound := round - 1
- _, _, biggestChunkLen, err := ct.generateCatchpointData(
+ _, _, _, biggestChunkLen, err := ct.generateCatchpointData(
context.Background(), accountsRound, time.Second)
require.NoError(t, err)
@@ -362,7 +363,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
i++
}
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
if err != nil {
return
}
@@ -1340,3 +1341,140 @@ func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T)
require.NoError(t, err)
require.Empty(t, unfinishedCatchpoints)
}
+
+// TestHashContract confirms the account, resource, and KV hashing algorithm
+// remains unchanged by comparing a newly calculated hash against a
+// known-to-be-correct hex-encoded hash.
+//
+// When the test fails a hash equality check, it implies a hash calculated
+// before the change != hash calculated now. Accepting the new hash risks
+// breaking backwards compatibility.
+//
+// The test also confirms each hashKind has at least 1 test case. The check
+// defends against the addition of a hashed data type without test coverage.
+func TestHashContract(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type testCase struct {
+ genHash func() []byte
+ expectedHex string
+ expectedHashKind hashKind
+ }
+
+ accountCase := func(genHash func() []byte, expectedHex string) testCase {
+ return testCase{
+ genHash, expectedHex, accountHK,
+ }
+ }
+
+ resourceAssetCase := func(genHash func() []byte, expectedHex string) testCase {
+ return testCase{
+ genHash, expectedHex, assetHK,
+ }
+ }
+
+ resourceAppCase := func(genHash func() []byte, expectedHex string) testCase {
+ return testCase{
+ genHash, expectedHex, appHK,
+ }
+ }
+
+ kvCase := func(genHash func() []byte, expectedHex string) testCase {
+ return testCase{
+ genHash, expectedHex, kvHK,
+ }
+ }
+
+ a := basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
+
+ accounts := []testCase{
+ accountCase(
+ func() []byte {
+ b := baseAccountData{
+ UpdateRound: 1024,
+ }
+ return accountHashBuilderV6(a, &b, protocol.Encode(&b))
+ },
+ "0000040000c3c39a72c146dc6bcb87b499b63ef730145a8fe4a187c96e9a52f74ef17f54",
+ ),
+ accountCase(
+ func() []byte {
+ b := baseAccountData{
+ RewardsBase: 10000,
+ }
+ return accountHashBuilderV6(a, &b, protocol.Encode(&b))
+ },
+ "0000271000804b58bcc81190c3c7343c1db9c737621ff0438104bdd20a25d12aa4e9b6e5",
+ ),
+ }
+
+ resourceAssets := []testCase{
+ resourceAssetCase(
+ func() []byte {
+ r := resourcesData{
+ Amount: 1000,
+ Decimals: 3,
+ AssetName: "test",
+ Manager: a,
+ }
+
+ bytes, err := resourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r))
+ require.NoError(t, err)
+ return bytes
+ },
+ "0000040001ca4157130516bd7f120cef4b3a28715e464d9a29f7575db9b2173b4eccd18e",
+ ),
+ }
+
+ resourceApps := []testCase{
+ resourceAppCase(
+ func() []byte {
+ r := resourcesData{
+ ApprovalProgram: []byte{1, 3, 10, 15},
+ ClearStateProgram: []byte{15, 10, 3, 1},
+ LocalStateSchemaNumUint: 2,
+ GlobalStateSchemaNumUint: 2,
+ }
+
+ bytes, err := resourcesHashBuilderV6(&r, a, 7, 1024, protocol.Encode(&r))
+ require.NoError(t, err)
+ return bytes
+ },
+ "00000400023547567f3234873b48fd4152f296a92ae260b024b93c2408f35caccff57c32",
+ ),
+ }
+
+ kvs := []testCase{
+ kvCase(
+ func() []byte {
+ return kvHashBuilderV6("sample key", []byte("sample value"))
+ },
+ "0000000003cca3d1a8d7d724daa445c795ad277a7a64b351b4b9407f738841282f9c348b",
+ ),
+ }
+
+ allCases := append(append(append(accounts, resourceAssets...), resourceApps...), kvs...)
+ for i, tc := range allCases {
+ t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
+ h := tc.genHash()
+ require.Equal(t, byte(tc.expectedHashKind), h[hashKindEncodingIndex])
+ require.Equal(t, tc.expectedHex, hex.EncodeToString(h))
+ })
+ }
+
+ hasTestCoverageForKind := func(hk hashKind) bool {
+ for _, c := range allCases {
+ if c.expectedHashKind == hk {
+ return true
+ }
+ }
+ return false
+ }
+
+ for i := byte(0); i < 255; i++ {
+ if !strings.HasPrefix(hashKind(i).String(), "hashKind(") {
+ require.True(t, hasTestCoverageForKind(hashKind(i)), fmt.Sprintf("Missing test coverage for hashKind ordinal value = %d", i))
+ }
+ }
+}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 6f1e11dfe..c7f87961b 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -36,9 +36,15 @@ const (
// note that the last chunk would typically be less than this number.
BalancesPerCatchpointFileChunk = 512
- // DefaultMaxResourcesPerChunk defines the max number of resources that go in a singular chunk
- // 300000 resources * 300B/resource => roughly max 100MB per chunk
- DefaultMaxResourcesPerChunk = 300000
+ // ResourcesPerCatchpointFileChunk defines the max number of resources that go in a singular chunk
+ // 100,000 resources * 20KB/resource => roughly max 2GB per chunk if all of them are max'ed out apps.
+ // In reality most entries are asset holdings, and they are very small.
+ ResourcesPerCatchpointFileChunk = 100_000
+
+ // resourcesPerCatchpointFileChunkBackwardCompatible is the old value for ResourcesPerCatchpointFileChunk.
+ // Size of a single resource entry was underestimated to 300 bytes that holds only for assets and not for apps.
+ // It is safe to remove after April, 2023 since we are only supporting catchpoint that are 6 months old.
+ resourcesPerCatchpointFileChunkBackwardCompatible = 300_000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -50,24 +56,25 @@ type catchpointWriter struct {
tx *sql.Tx
filePath string
totalAccounts uint64
- totalChunks uint64
+ totalKVs uint64
file *os.File
tar *tar.Writer
compressor io.WriteCloser
- balancesChunk catchpointFileBalancesChunkV6
- balancesChunkNum uint64
- numAccountsProcessed uint64
+ chunk catchpointFileChunkV6
+ chunkNum uint64
writtenBytes int64
biggestChunkLen uint64
accountsIterator encodedAccountsBatchIter
maxResourcesPerChunk int
+ accountsDone bool
+ kvRows *sql.Rows
}
type encodedBalanceRecordV5 struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Address basics.Address `codec:"pk,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"ad,allocbound=basics.MaxEncodedAccountDataSize"`
+ AccountData msgp.Raw `codec:"ad"` // encoding of basics.AccountData
}
type catchpointFileBalancesChunkV5 struct {
@@ -75,7 +82,7 @@ type catchpointFileBalancesChunkV5 struct {
Balances []encodedBalanceRecordV5 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}
-// SortUint64 re-export this sort, which is implmented in basics, and being used by the msgp when
+// SortUint64 re-export this sort, which is implemented in basics, and being used by the msgp when
// encoding the resources map below.
type SortUint64 = basics.SortUint64
@@ -83,17 +90,43 @@ type encodedBalanceRecordV6 struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"b,allocbound=basics.MaxEncodedAccountDataSize"`
- Resources map[uint64]msgp.Raw `codec:"c,allocbound=basics.MaxEncodedAccountDataSize"`
+ AccountData msgp.Raw `codec:"b"` // encoding of baseAccountData
+ Resources map[uint64]msgp.Raw `codec:"c,allocbound=resourcesPerCatchpointFileChunkBackwardCompatible"` // map of resourcesData
// flag indicating whether there are more records for the same account coming up
ExpectingMoreEntries bool `codec:"e"`
}
-type catchpointFileBalancesChunkV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
+// Adjust these to be big enough for boxes, but not directly tied to box values.
+const (
+ // For boxes: "bx:<8 bytes><64 byte name>"
+ encodedKVRecordV6MaxKeyLength = 128
+
+ // For boxes: MaxBoxSize
+ encodedKVRecordV6MaxValueLength = 32768
+
+ // MaxEncodedKVDataSize is the max size of serialized KV entry, checked with TestEncodedKVDataSize.
+ // Exact value is 32906 that is 10 bytes more than 32768 + 128
+ MaxEncodedKVDataSize = 33000
+)
+
+type encodedKVRecordV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Key []byte `codec:"k,allocbound=encodedKVRecordV6MaxKeyLength"`
+ Value []byte `codec:"v,allocbound=encodedKVRecordV6MaxValueLength"`
+}
+
+type catchpointFileChunkV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
numAccounts uint64
+ KVs []encodedKVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"`
+}
+
+func (chunk catchpointFileChunkV6) empty() bool {
+ return len(chunk.Balances) == 0 && len(chunk.KVs) == 0
}
func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxResourcesPerChunk int) (*catchpointWriter, error) {
@@ -102,6 +135,11 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxR
return nil, err
}
+ totalKVs, err := totalKVs(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
+
err = os.MkdirAll(filepath.Dir(filePath), 0700)
if err != nil {
return nil, err
@@ -121,7 +159,7 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxR
tx: tx,
filePath: filePath,
totalAccounts: totalAccounts,
- totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
+ totalKVs: totalKVs,
file: file,
compressor: compressor,
tar: tar,
@@ -138,93 +176,110 @@ func (cw *catchpointWriter) Abort() error {
return os.Remove(cw.filePath)
}
+// WriteStep works for a short period of time (determined by stepCtx) to get
+// some more data (accounts/resources/kvpairs) by using readDatabaseStep, and
+// write that data to the open tar file in cw.tar. The writing is done in
+// asyncWriter, so that it can proceed concurrently with reading the data from
+// the db. asyncWriter only runs long enough to process the data read during a
+// single call to WriteStep, and WriteStep ensures that asyncWriter has finished
+// writing by waiting for it in a defer block, collecting any errors that may
+// have occurred during writing. Therefore, WriteStep looks like a simple
+// synchronous function to its callers.
func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err error) {
// have we timed-out / canceled by that point ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
- writerRequest := make(chan catchpointFileBalancesChunkV6, 1)
+ writerRequest := make(chan catchpointFileChunkV6, 1)
writerResponse := make(chan error, 2)
- go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum, cw.numAccountsProcessed)
+ go cw.asyncWriter(writerRequest, writerResponse, cw.chunkNum)
defer func() {
+ // For simplicity, all cleanup is done once, here. The writerRequest is
+ // closed, signaling asyncWriter that it can exit, and then
+ // writerResponse is drained, ensuring any problems from asyncWriter are
+ // noted (and that the writing is done).
close(writerRequest)
- // wait for the writerResponse to close.
+ drain:
for {
select {
case writerError, open := <-writerResponse:
if open {
err = writerError
} else {
- return
+ break drain
}
}
}
+ if !more {
+ // If we're done, close up the tar file and report on size
+ cw.tar.Close()
+ cw.compressor.Close()
+ cw.file.Close()
+ fileInfo, statErr := os.Stat(cw.filePath)
+ if statErr != nil {
+ err = statErr
+ }
+ cw.writtenBytes = fileInfo.Size()
+
+ // These don't HAVE to be closed, since the "owning" tx will be cmmmitted/rolledback
+ cw.accountsIterator.Close()
+ if cw.kvRows != nil {
+ cw.kvRows.Close()
+ cw.kvRows = nil
+ }
+ }
}()
for {
- // have we timed-out / canceled by that point ?
+ // have we timed-out or been canceled ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
- if len(cw.balancesChunk.Balances) == 0 {
+ if cw.chunk.empty() {
err = cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return
}
+ // readDatabaseStep yielded nothing, we're done
+ if cw.chunk.empty() {
+ return false, nil
+ }
}
- // have we timed-out / canceled by that point ?
+ // have we timed-out or been canceled ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
// check if we had any error on the writer from previous iterations.
+ // this should not be required for correctness, since we'll find the
+ // error in the defer block. But this might notice earlier.
select {
case err := <-writerResponse:
- // we ran into an error. wait for the channel to close before returning with the error.
- <-writerResponse
return false, err
default:
}
- // write to disk.
- if len(cw.balancesChunk.Balances) > 0 {
- cw.numAccountsProcessed += cw.balancesChunk.numAccounts
- cw.balancesChunkNum++
- writerRequest <- cw.balancesChunk
- if cw.numAccountsProcessed == cw.totalAccounts {
- cw.accountsIterator.Close()
- // if we're done, wait for the writer to complete it's writing.
- err, opened := <-writerResponse
- if opened {
- // we ran into an error. wait for the channel to close before returning with the error.
- <-writerResponse
- return false, err
- }
- // channel is closed. we're done writing and no issues detected.
- return false, nil
- }
- cw.balancesChunk.Balances = nil
- }
+ // send the chunk to the asyncWriter channel
+ cw.chunkNum++
+ writerRequest <- cw.chunk
+ // indicate that we need a readDatabaseStep
+ cw.chunk = catchpointFileChunkV6{}
}
}
-func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64, initialNumAccounts uint64) {
+func (cw *catchpointWriter) asyncWriter(chunks chan catchpointFileChunkV6, response chan error, chunkNum uint64) {
defer close(response)
- balancesChunkNum := initialBalancesChunkNum
- numAccountsProcessed := initialNumAccounts
- for bc := range balances {
- balancesChunkNum++
- numAccountsProcessed += bc.numAccounts
- if len(bc.Balances) == 0 {
+ for chk := range chunks {
+ chunkNum++
+ if chk.empty() {
break
}
-
- encodedChunk := protocol.Encode(&bc)
+ encodedChunk := protocol.Encode(&chk)
err := cw.tar.WriteHeader(&tar.Header{
- Name: fmt.Sprintf("balances.%d.%d.msgpack", balancesChunkNum, cw.totalChunks),
+ Name: fmt.Sprintf("balances.%d.msgpack", chunkNum),
Mode: 0600,
Size: int64(len(encodedChunk)),
})
@@ -240,43 +295,53 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
cw.biggestChunkLen = chunkLen
}
- if numAccountsProcessed == cw.totalAccounts {
- cw.tar.Close()
- cw.compressor.Close()
- cw.file.Close()
- var fileInfo os.FileInfo
- fileInfo, err = os.Stat(cw.filePath)
- if err != nil {
- response <- err
- break
- }
- cw.writtenBytes = fileInfo.Size()
- break
- }
}
}
-func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
- cw.balancesChunk.Balances, cw.balancesChunk.numAccounts, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
- return
-}
-
-// GetSize returns the number of bytes that have been written to the file.
-func (cw *catchpointWriter) GetSize() int64 {
- return cw.writtenBytes
-}
-
-// GetBalancesCount returns the number of balances written to this catchpoint file.
-func (cw *catchpointWriter) GetTotalAccounts() uint64 {
- return cw.totalAccounts
-}
+// readDatabaseStep places the next chunk of records into cw.chunk. It yields
+// all of the account chunks first, and then the kv chunks. Even if the accounts
+// are evenly divisible by BalancesPerCatchpointFileChunk, it must not return an
+// empty chunk between accounts and kvs.
+func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) error {
+ if !cw.accountsDone {
+ balances, numAccounts, err := cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
+ if err != nil {
+ return err
+ }
+ if len(balances) > 0 {
+ cw.chunk = catchpointFileChunkV6{Balances: balances, numAccounts: numAccounts}
+ return nil
+ }
+ // It might seem reasonable, but do not close accountsIterator here,
+ // else it will start over on the next iteration
+ // cw.accountsIterator.Close()
+ cw.accountsDone = true
+ }
-func (cw *catchpointWriter) GetTotalChunks() uint64 {
- return cw.totalChunks
-}
+ // Create the *Rows iterator JIT
+ if cw.kvRows == nil {
+ rows, err := tx.QueryContext(ctx, "SELECT key, value FROM kvstore")
+ if err != nil {
+ return err
+ }
+ cw.kvRows = rows
+ }
-func (cw *catchpointWriter) GetBiggestChunkLen() uint64 {
- return cw.biggestChunkLen
+ kvrs := make([]encodedKVRecordV6, 0, BalancesPerCatchpointFileChunk)
+ for cw.kvRows.Next() {
+ var k []byte
+ var v []byte
+ err := cw.kvRows.Scan(&k, &v)
+ if err != nil {
+ return err
+ }
+ kvrs = append(kvrs, encodedKVRecordV6{Key: k, Value: v})
+ if len(kvrs) == BalancesPerCatchpointFileChunk {
+ break
+ }
+ }
+ cw.chunk = catchpointFileChunkV6{KVs: kvrs}
+ return nil
}
// hasContextDeadlineExceeded examine the given context and see if it was canceled or timed-out.
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 8c8493372..5b7563b6e 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -24,175 +24,85 @@ import (
"database/sql"
"fmt"
"io"
+ "math"
"os"
"path/filepath"
- "runtime"
+ "strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/msgp/msgp"
)
-func makeString(len int) string {
- s := ""
- for i := 0; i < len; i++ {
- s += string(byte(i))
- }
- return s
-}
+func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
-func makeTestEncodedBalanceRecordV5(t *testing.T) encodedBalanceRecordV5 {
- er := encodedBalanceRecordV5{}
- hash := crypto.Hash([]byte{1, 2, 3})
- copy(er.Address[:], hash[:])
- oneTimeSecrets := crypto.GenerateOneTimeSignatureSecrets(0, 1)
- vrfSecrets := crypto.GenerateVRFSecrets()
- var stateProofID merklesignature.Verifier
- crypto.RandBytes(stateProofID.Commitment[:])
-
- ad := basics.AccountData{
- Status: basics.NotParticipating,
- MicroAlgos: basics.MicroAlgos{},
- RewardsBase: 0x1234123412341234,
- RewardedMicroAlgos: basics.MicroAlgos{},
- VoteID: oneTimeSecrets.OneTimeSignatureVerifier,
- SelectionID: vrfSecrets.PK,
- StateProofID: stateProofID.Commitment,
- VoteFirstValid: basics.Round(0x1234123412341234),
- VoteLastValid: basics.Round(0x1234123412341234),
- VoteKeyDilution: 0x1234123412341234,
- AssetParams: make(map[basics.AssetIndex]basics.AssetParams),
- Assets: make(map[basics.AssetIndex]basics.AssetHolding),
- AuthAddr: basics.Address(crypto.Hash([]byte{1, 2, 3, 4})),
- }
- currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
- maxAssetsPerAccount := currentConsensusParams.MaxAssetsPerAccount
- // if the number of supported assets is unlimited, create only 1000 for the purpose of this unit test.
- if maxAssetsPerAccount == 0 {
- maxAssetsPerAccount = config.Consensus[protocol.ConsensusV30].MaxAssetsPerAccount
- }
- for assetCreatorAssets := 0; assetCreatorAssets < maxAssetsPerAccount; assetCreatorAssets++ {
- ap := basics.AssetParams{
- Total: 0x1234123412341234,
- Decimals: 0x12341234,
- DefaultFrozen: true,
- UnitName: makeString(currentConsensusParams.MaxAssetUnitNameBytes),
- AssetName: makeString(currentConsensusParams.MaxAssetNameBytes),
- URL: makeString(currentConsensusParams.MaxAssetURLBytes),
- Manager: basics.Address(crypto.Hash([]byte{1, byte(assetCreatorAssets)})),
- Reserve: basics.Address(crypto.Hash([]byte{2, byte(assetCreatorAssets)})),
- Freeze: basics.Address(crypto.Hash([]byte{3, byte(assetCreatorAssets)})),
- Clawback: basics.Address(crypto.Hash([]byte{4, byte(assetCreatorAssets)})),
- }
- copy(ap.MetadataHash[:], makeString(32))
- ad.AssetParams[basics.AssetIndex(0x1234123412341234-assetCreatorAssets)] = ap
- }
+ // check a low number of balances/kvs/resources
+ // otherwise it would take forever to serialize/deserialize
+ const numChunkEntries = BalancesPerCatchpointFileChunk / 50
+ require.Greater(t, numChunkEntries, 1)
- for assetHolderAssets := 0; assetHolderAssets < maxAssetsPerAccount; assetHolderAssets++ {
- ah := basics.AssetHolding{
- Amount: 0x1234123412341234,
- Frozen: true,
- }
- ad.Assets[basics.AssetIndex(0x1234123412341234-assetHolderAssets)] = ah
- }
+ const numResources = ResourcesPerCatchpointFileChunk / 10000
+ require.Greater(t, numResources, 1)
- maxApps := currentConsensusParams.MaxAppsCreated
- maxOptIns := currentConsensusParams.MaxAppsOptedIn
- if maxApps == 0 {
- maxApps = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
- }
- if maxOptIns == 0 {
- maxOptIns = config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
- }
- maxKeyBytesLen := currentConsensusParams.MaxAppKeyLen
- maxSumBytesLen := currentConsensusParams.MaxAppSumKeyValueLens
+ baseAD := randomBaseAccountData()
+ encodedBaseAD := baseAD.MarshalMsg(nil)
- genKey := func() (string, basics.TealValue) {
- len := int(crypto.RandUint64() % uint64(maxKeyBytesLen))
- if len == 0 {
- return "k", basics.TealValue{Type: basics.TealUintType, Uint: 0}
- }
- key := make([]byte, maxSumBytesLen-len)
- crypto.RandBytes(key)
- return string(key), basics.TealValue{Type: basics.TealUintType, Bytes: string(key)}
+ resources := make(map[uint64]msgp.Raw, numResources/10)
+ rdApp := randomAppResourceData()
+ encodedResourceData := rdApp.MarshalMsg(nil)
+ for i := uint64(0); i < numResources; i++ {
+ resources[i] = encodedResourceData
}
- startIndex := crypto.RandUint64() % 100000
- ad.AppParams = make(map[basics.AppIndex]basics.AppParams, maxApps)
- for aidx := startIndex; aidx < startIndex+uint64(maxApps); aidx++ {
- ap := basics.AppParams{}
- ap.GlobalState = make(basics.TealKeyValue)
- for i := uint64(0); i < currentConsensusParams.MaxGlobalSchemaEntries/4; i++ {
- k, v := genKey()
- ap.GlobalState[k] = v
- }
- ad.AppParams[basics.AppIndex(aidx)] = ap
- optins := maxApps
- if maxApps > maxOptIns {
- optins = maxOptIns
- }
- ad.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, optins)
- keys := currentConsensusParams.MaxLocalSchemaEntries / 4
- lkv := make(basics.TealKeyValue, keys)
- for i := 0; i < optins; i++ {
- for j := uint64(0); j < keys; j++ {
- k, v := genKey()
- lkv[k] = v
- }
- }
- ad.AppLocalStates[basics.AppIndex(aidx)] = basics.AppLocalState{KeyValue: lkv}
+ balance := encodedBalanceRecordV6{
+ Address: ledgertesting.RandomAddress(),
+ AccountData: encodedBaseAD,
+ Resources: resources,
}
+ balances := make([]encodedBalanceRecordV6, numChunkEntries)
+ kv := encodedKVRecordV6{
+ Key: make([]byte, encodedKVRecordV6MaxKeyLength),
+ Value: make([]byte, encodedKVRecordV6MaxValueLength),
+ }
+ crypto.RandBytes(kv.Key[:])
+ crypto.RandBytes(kv.Value[:])
+ kvs := make([]encodedKVRecordV6, numChunkEntries)
- encodedAd := ad.MarshalMsg(nil)
- er.AccountData = encodedAd
- return er
-}
-
-func TestEncodedBalanceRecordEncoding(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- er := makeTestEncodedBalanceRecordV5(t)
- encodedBr := er.MarshalMsg(nil)
-
- var er2 encodedBalanceRecordV5
- _, err := er2.UnmarshalMsg(encodedBr)
- require.NoError(t, err)
-
- require.Equal(t, er, er2)
-}
-
-func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // The next operations are heavy on the memory.
- // Garbage collection helps prevent trashing
- runtime.GC()
-
- fbc := catchpointFileBalancesChunkV5{}
- for i := 0; i < 512; i++ {
- fbc.Balances = append(fbc.Balances, makeTestEncodedBalanceRecordV5(t))
+ for i := 0; i < numChunkEntries; i++ {
+ balances[i] = balance
+ kvs[i] = kv
}
- encodedFbc := fbc.MarshalMsg(nil)
- var fbc2 catchpointFileBalancesChunkV5
- _, err := fbc2.UnmarshalMsg(encodedFbc)
+ chunk1 := catchpointFileChunkV6{}
+ chunk1.Balances = balances
+ chunk1.KVs = kvs
+ encodedChunk := chunk1.MarshalMsg(nil)
+
+ var chunk2 catchpointFileChunkV6
+ _, err := chunk2.UnmarshalMsg(encodedChunk)
require.NoError(t, err)
- require.Equal(t, fbc, fbc2)
- // Garbage collection helps prevent trashing
- // for next tests
- runtime.GC()
+ require.Equal(t, chunk1, chunk2)
}
func TestBasicCatchpointWriter(t *testing.T) {
partitiontest.PartitionTest(t)
+ // t.Parallel() NO! config.Consensus is modified
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBasicCatchpointWriter")
@@ -219,7 +129,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
readDb := ml.trackerDB().Rdb
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), fileName, tx, DefaultMaxResourcesPerChunk)
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk)
if err != nil {
return err
}
@@ -262,51 +172,29 @@ func TestBasicCatchpointWriter(t *testing.T) {
}
}
- require.Equal(t, "balances.1.1.msgpack", header.Name)
+ require.Equal(t, "balances.1.msgpack", header.Name)
- var balances catchpointFileBalancesChunkV6
- err = protocol.Decode(balancesBlockBytes, &balances)
+ var chunk catchpointFileChunkV6
+ err = protocol.Decode(balancesBlockBytes, &chunk)
require.NoError(t, err)
- require.Equal(t, uint64(len(accts)), uint64(len(balances.Balances)))
+ require.Equal(t, uint64(len(accts)), uint64(len(chunk.Balances)))
_, err = tarReader.Next()
require.Equal(t, io.EOF, err)
}
-func TestFullCatchpointWriter(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // create new protocol version, which has lower lookback
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.CatchpointLookback = 32
- config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectory := t.TempDir()
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
- accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
- defer ml.Close()
-
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- conf.Archival = true
- au, _ := newAcctUpdates(t, ml, conf)
- err := au.loadFromDisk(ml, 0)
- require.NoError(t, err)
- au.close()
- catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
- catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
- readDb := ml.trackerDB().Rdb
+func testWriteCatchpoint(t *testing.T, rdb db.Accessor, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader {
var totalAccounts uint64
var totalChunks uint64
var biggestChunkLen uint64
var accountsRnd basics.Round
var totals ledgercore.AccountTotals
- err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ if maxResourcesPerChunk <= 0 {
+ maxResourcesPerChunk = ResourcesPerCatchpointFileChunk
+ }
+
+ err := rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer, err := makeCatchpointWriter(context.Background(), datapath, tx, maxResourcesPerChunk)
if err != nil {
return err
}
@@ -317,9 +205,9 @@ func TestFullCatchpointWriter(t *testing.T) {
break
}
}
- totalAccounts = writer.GetTotalAccounts()
- totalChunks = writer.GetTotalChunks()
- biggestChunkLen = writer.GetBiggestChunkLen()
+ totalAccounts = writer.totalAccounts
+ totalChunks = writer.chunkNum
+ biggestChunkLen = writer.biggestChunkLen
accountsRnd, err = accountsRound(tx)
if err != nil {
return
@@ -343,73 +231,13 @@ func TestFullCatchpointWriter(t *testing.T) {
}
err = repackCatchpoint(
context.Background(), catchpointFileHeader, biggestChunkLen,
- catchpointDataFilePath, catchpointFilePath)
+ datapath, filepath)
require.NoError(t, err)
- // create a ledger.
- var initState ledgercore.InitState
- initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
- l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
- require.NoError(t, err)
+ l := testNewLedgerFromCatchpoint(t, filepath)
defer l.Close()
- accessor := MakeCatchpointCatchupAccessor(l, l.log)
- err = accessor.ResetStagingBalances(context.Background(), true)
- require.NoError(t, err)
-
- // load the file from disk.
- fileContent, err := os.ReadFile(catchpointFilePath)
- require.NoError(t, err)
- gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
- require.NoError(t, err)
- tarReader := tar.NewReader(gzipReader)
- var catchupProgress CatchpointCatchupAccessorProgress
- defer gzipReader.Close()
- for {
- header, err := tarReader.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- require.NoError(t, err)
- break
- }
- balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
-
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- require.NoError(t, err)
- }
- break
- }
- }
- err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
- require.NoError(t, err)
- }
-
- err = accessor.BuildMerkleTrie(context.Background(), nil)
- require.NoError(t, err)
-
- err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
- return err
- })
- require.NoError(t, err)
-
- // verify that the account data aligns with what we originally stored :
- for addr, acct := range accts {
- acctData, validThrough, _, err := l.LookupLatest(addr)
- require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
- require.Equal(t, acct, acctData)
- require.Equal(t, basics.Round(0), validThrough)
- }
+ return catchpointFileHeader
}
func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
@@ -468,14 +296,14 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
}
// repeat this until read all accts
for totalAccountsWritten < expectedTotalAccounts {
- cw.balancesChunk.Balances = nil
+ cw.chunk.Balances = nil
err := cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return err
}
- totalAccountsWritten += cw.balancesChunk.numAccounts
+ totalAccountsWritten += cw.chunk.numAccounts
numResources := 0
- for _, balance := range cw.balancesChunk.Balances {
+ for _, balance := range cw.chunk.Balances {
numResources += len(balance.Resources)
}
if numResources > maxResourcesPerChunk {
@@ -512,7 +340,7 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
delete(config.Consensus, testProtocolVersion)
}()
- maxResourcesPerChunk := 5
+ const maxResourcesPerChunk = 5
accts := ledgertesting.RandomAccounts(5, false)
// force each acct to have overflowing number of resources
@@ -551,20 +379,21 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
totalResources := 0
var expectedTotalResources int
cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
+ require.NoError(t, err)
err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources)
if err != nil {
return err
}
// repeat this until read all accts
for totalAccountsWritten < expectedTotalAccounts {
- cw.balancesChunk.Balances = nil
+ cw.chunk.Balances = nil
err := cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return err
}
- totalAccountsWritten += cw.balancesChunk.numAccounts
+ totalAccountsWritten += cw.chunk.numAccounts
numResources := 0
- for _, balance := range cw.balancesChunk.Balances {
+ for _, balance := range cw.chunk.Balances {
numResources += len(balance.Resources)
}
if numResources > maxResourcesPerChunk {
@@ -609,66 +438,96 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
au.close()
catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
- readDb := ml.trackerDB().Rdb
- var totalAccounts uint64
- var totalChunks uint64
- var biggestChunkLen uint64
- var accountsRnd basics.Round
- var totals ledgercore.AccountTotals
- err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, 5)
- if err != nil {
- return err
- }
- for {
- more, err := writer.WriteStep(context.Background())
+ const maxResourcesPerChunk = 5
+ testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, maxResourcesPerChunk)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+
+ // verify that the account data aligns with what we originally stored :
+ for addr, acct := range accts {
+ acctData, validThrough, _, err := l.LookupLatest(addr)
+ require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
+ require.Equal(t, acct, acctData)
+ require.Equal(t, basics.Round(0), validThrough)
+ }
+
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected:
+ // no errors on read, hashes match
+ ctx := context.Background()
+ tx, err := l.trackerDBs.Wdb.Handle.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ // save the existing hash
+ committer, err := MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ h1, err := trie.RootHash()
+ require.NoError(t, err)
+ require.NotEmpty(t, h1)
+
+ // reset hashes
+ err = resetAccountHashes(ctx, tx)
+ require.NoError(t, err)
+
+ // rebuild the MT
+ committer, err = MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err = merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ h, err := trie.RootHash()
+ require.NoError(t, err)
+ require.Zero(t, h)
+
+ iter := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ defer iter.Close(ctx)
+ for {
+ accts, _, err := iter.Next(ctx)
+ if err == sql.ErrNoRows {
+ // the account builder would return sql.ErrNoRows when no more data is available.
+ err = nil
+ break
+ } else if err != nil {
require.NoError(t, err)
- if !more {
- break
- }
}
- totalAccounts = writer.GetTotalAccounts()
- totalChunks = writer.GetTotalChunks()
- biggestChunkLen = writer.GetBiggestChunkLen()
- accountsRnd, err = accountsRound(tx)
- if err != nil {
- return
+
+ if len(accts) > 0 {
+ for _, acct := range accts {
+ added, err := trie.Add(acct.digest)
+ require.NoError(t, err)
+ require.True(t, added)
+ }
}
- totals, err = accountsTotals(ctx, tx, false)
- return
- })
- require.NoError(t, err)
- blocksRound := accountsRnd + 1
- blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
- catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
- catchpointFileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
- BalancesRound: accountsRnd,
- BlocksRound: blocksRound,
- Totals: totals,
- TotalAccounts: totalAccounts,
- TotalChunks: totalChunks,
- Catchpoint: catchpointLabel,
- BlockHeaderDigest: blockHeaderDigest,
}
- err = repackCatchpoint(
- context.Background(), catchpointFileHeader, biggestChunkLen,
- catchpointDataFilePath, catchpointFilePath)
require.NoError(t, err)
+ h2, err := trie.RootHash()
+ require.NoError(t, err)
+ require.NotEmpty(t, h2)
+ require.Equal(t, h1, h2)
+}
+
+func testNewLedgerFromCatchpoint(t *testing.T, filepath string) *Ledger {
// create a ledger.
var initState ledgercore.InitState
initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
- l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
+ conf := config.GetDefaultLocal()
+ l, err := OpenLedger(logging.TestingLog(t), t.Name()+"FromCatchpoint", true, initState, conf)
require.NoError(t, err)
- defer l.Close()
accessor := MakeCatchpointCatchupAccessor(l, l.log)
err = accessor.ResetStagingBalances(context.Background(), true)
require.NoError(t, err)
// load the file from disk.
- fileContent, err := os.ReadFile(catchpointFilePath)
+ fileContent, err := os.ReadFile(filepath)
require.NoError(t, err)
gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
require.NoError(t, err)
@@ -700,7 +559,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
break
}
}
- err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
+ err = accessor.ProcessStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
require.NoError(t, err)
}
@@ -712,7 +571,41 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
return err
})
require.NoError(t, err)
+ return l
+}
+
+func TestFullCatchpointWriter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // t.Parallel() NO! config.Consensus is modified
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
+ testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
// verify that the account data aligns with what we originally stored :
for addr, acct := range accts {
acctData, validThrough, _, err := l.LookupLatest(addr)
@@ -721,3 +614,172 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.Equal(t, basics.Round(0), validThrough)
}
}
+
+func TestExactAccountChunk(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture)
+ defer dl.Close()
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Amount: 1_000_000,
+ }
+ // There are 12 accounts in the NewTestGenesis, so we create more so that we
+ // have exactly one chunk's worth, to make sure that works without an empty
+ // chunk between accounts and kvstore.
+ for i := 0; i < (BalancesPerCatchpointFileChunk - 12); i++ {
+ newacctpay := pay
+ newacctpay.Receiver = ledgertesting.RandomAddress()
+ dl.fullBlock(&newacctpay)
+ }
+
+ // At least 32 more blocks so that we catchpoint after the accounts exist
+ for i := 0; i < 40; i++ {
+ selfpay := pay
+ selfpay.Receiver = addrs[0]
+ selfpay.Note = ledgertesting.RandomNote()
+ dl.fullBlock(&selfpay)
+ }
+
+ tempDir := t.TempDir()
+
+ catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data")
+ catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz")
+
+ cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 1)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+}
+
+func TestCatchpointAfterTxns(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture)
+ defer dl.Close()
+
+ boxApp := dl.fundedApp(addrs[1], 1_000_000, boxAppSource)
+ callBox := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: boxApp,
+ }
+
+ makeBox := callBox.Args("create", "xxx")
+ makeBox.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("xxx")}}
+ dl.txn(makeBox)
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ Amount: 100000,
+ }
+ // There are 12 accounts in the NewTestGenesis, plus 1 app account, so we
+ // create more so that we have exactly one chunk's worth, to make sure that
+ // works without an empty chunk between accounts and kvstore.
+ for i := 0; i < (BalancesPerCatchpointFileChunk - 13); i++ {
+ newacctpay := pay
+ newacctpay.Receiver = ledgertesting.RandomAddress()
+ dl.fullBlock(&newacctpay)
+ }
+ for i := 0; i < 40; i++ {
+ dl.fullBlock(pay.Noted(strconv.Itoa(i)))
+ }
+
+ tempDir := t.TempDir()
+
+ catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data")
+ catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz")
+
+ cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, 2, cph.TotalChunks)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ values, err := l.LookupKeysByPrefix(l.Latest(), "bx:", 10)
+ require.NoError(t, err)
+ require.Len(t, values, 1)
+
+ // Add one more account
+ newacctpay := pay
+ last := ledgertesting.RandomAddress()
+ newacctpay.Receiver = last
+ dl.fullBlock(&newacctpay)
+
+ // Write and read back in, and ensure even the last effect exists.
+ cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 2) // Still only 2 chunks, as last was in a recent block
+
+ // Drive home the point that `last` is _not_ included in the catchpoint by inspecting balance read from catchpoint.
+ {
+ l = testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ _, _, algos, err := l.LookupLatest(last)
+ require.NoError(t, err)
+ require.Equal(t, basics.MicroAlgos{}, algos)
+ }
+
+ for i := 0; i < 40; i++ { // Advance so catchpoint sees the txns
+ dl.fullBlock(pay.Noted(strconv.Itoa(i)))
+ }
+
+ cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 3)
+
+ l = testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ values, err = l.LookupKeysByPrefix(l.Latest(), "bx:", 10)
+ require.NoError(t, err)
+ require.Len(t, values, 1)
+
+ // Confirm `last` balance is now available in the catchpoint.
+ {
+ // Since fast catchup consists of multiple steps and the test only performs catchpoint reads, the resulting ledger is incomplete.
+ // That's why the assertion ignores rewards and does _not_ use `LookupLatest`.
+ ad, _, err := l.LookupWithoutRewards(0, last)
+ require.NoError(t, err)
+ require.Equal(t, basics.MicroAlgos{Raw: 100_000}, ad.MicroAlgos)
+ }
+}
+
+func TestEncodedKVRecordV6Allocbounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for version, params := range config.Consensus {
+ require.GreaterOrEqualf(t, uint64(encodedKVRecordV6MaxValueLength), params.MaxBoxSize, "Allocbound constant no longer valid as of consensus version %s", version)
+ longestPossibleBoxName := string(make([]byte, params.MaxAppKeyLen))
+ longestPossibleKey := logic.MakeBoxKey(basics.AppIndex(math.MaxUint64), longestPossibleBoxName)
+ require.GreaterOrEqualf(t, encodedKVRecordV6MaxValueLength, len(longestPossibleKey), "Allocbound constant no longer valid as of consensus version %s", version)
+ }
+}
+
+func TestEncodedKVDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ require.GreaterOrEqual(t, encodedKVRecordV6MaxKeyLength, currentConsensusParams.MaxAppKeyLen)
+ require.GreaterOrEqual(t, uint64(encodedKVRecordV6MaxValueLength), currentConsensusParams.MaxBoxSize)
+
+ kvEntry := encodedKVRecordV6{
+ Key: make([]byte, encodedKVRecordV6MaxKeyLength),
+ Value: make([]byte, encodedKVRecordV6MaxValueLength),
+ }
+
+ crypto.RandBytes(kvEntry.Key[:])
+ crypto.RandBytes(kvEntry.Value[:])
+
+ encoded := kvEntry.MarshalMsg(nil)
+ require.GreaterOrEqual(t, MaxEncodedKVDataSize, len(encoded))
+
+}
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 2a12377d3..5ca08bff7 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -55,11 +55,11 @@ type CatchpointCatchupAccessor interface {
// ResetStagingBalances resets the current staging balances, preparing for a new set of balances to be added
ResetStagingBalances(ctx context.Context, newCatchup bool) (err error)
- // ProgressStagingBalances deserialize the given bytes as a temporary staging balances
- ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error)
+ // ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+ ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error)
// BuildMerkleTrie inserts the account hashes into the merkle trie
- BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error)
+ BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error)
// GetCatchupBlockRound returns the latest block round matching the current catchpoint
GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error)
@@ -95,6 +95,7 @@ type stagingWriter interface {
writeBalances(context.Context, []normalizedAccountBalance) error
writeCreatables(context.Context, []normalizedAccountBalance) error
writeHashes(context.Context, []normalizedAccountBalance) error
+ writeKVs(context.Context, []encodedKVRecordV6) error
isShared() bool
}
@@ -108,6 +109,12 @@ func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []normal
})
}
+func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+ return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ return writeCatchpointStagingKVs(ctx, tx, kvrs)
+ })
+}
+
func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []normalizedAccountBalance) error {
return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
return writeCatchpointStagingCreatable(ctx, tx, balances)
@@ -263,11 +270,13 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
return
}
-// CatchpointCatchupAccessorProgress is used by the caller of ProgressStagingBalances to obtain progress information
+// CatchpointCatchupAccessorProgress is used by the caller of ProcessStagingBalances to obtain progress information
type CatchpointCatchupAccessorProgress struct {
TotalAccounts uint64
ProcessedAccounts uint64
ProcessedBytes uint64
+ TotalKVs uint64
+ ProcessedKVs uint64
TotalChunks uint64
SeenHeader bool
Version uint64
@@ -280,10 +289,11 @@ type CatchpointCatchupAccessorProgress struct {
BalancesWriteDuration time.Duration
CreatablesWriteDuration time.Duration
HashesWriteDuration time.Duration
+ KVWriteDuration time.Duration
}
-// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
-func (c *catchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
+// ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+func (c *catchpointCatchupAccessorImpl) ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if sectionName == "content.msgpack" {
return c.processStagingContent(ctx, bytes, progress)
}
@@ -291,7 +301,7 @@ func (c *catchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Cont
return c.processStagingBalances(ctx, bytes, progress)
}
// we want to allow undefined sections to support backward compatibility.
- c.log.Warnf("CatchpointCatchupAccessorImpl::ProgressStagingBalances encountered unexpected section name '%s' of length %d, which would be ignored", sectionName, len(bytes))
+ c.log.Warnf("CatchpointCatchupAccessorImpl::ProcessStagingBalances encountered unexpected section name '%s' of length %d, which would be ignored", sectionName, len(bytes))
return nil
}
@@ -336,6 +346,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
if err == nil {
progress.SeenHeader = true
progress.TotalAccounts = fileHeader.TotalAccounts
+ progress.TotalKVs = fileHeader.TotalKVs
+
progress.TotalChunks = fileHeader.TotalChunks
progress.Version = fileHeader.Version
c.ledger.setSynchronousMode(ctx, c.ledger.accountsRebuildSynchronousMode)
@@ -354,6 +366,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var normalizedAccountBalances []normalizedAccountBalance
var expectingMoreEntries []bool
+ var chunkKVs []encodedKVRecordV6
switch progress.Version {
default:
@@ -375,21 +388,22 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
expectingMoreEntries = make([]bool, len(balances.Balances))
case CatchpointFileVersionV6:
- var balances catchpointFileBalancesChunkV6
- err = protocol.Decode(bytes, &balances)
+ var chunk catchpointFileChunkV6
+ err = protocol.Decode(bytes, &chunk)
if err != nil {
return err
}
- if len(balances.Balances) == 0 {
- return fmt.Errorf("processStagingBalances received a chunk with no accounts")
+ if len(chunk.Balances) == 0 && len(chunk.KVs) == 0 {
+ return fmt.Errorf("processStagingBalances received a chunk with no accounts or KVs")
}
- normalizedAccountBalances, err = prepareNormalizedBalancesV6(balances.Balances, c.ledger.GenesisProto())
- expectingMoreEntries = make([]bool, len(balances.Balances))
- for i, balance := range balances.Balances {
+ normalizedAccountBalances, err = prepareNormalizedBalancesV6(chunk.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(chunk.Balances))
+ for i, balance := range chunk.Balances {
expectingMoreEntries[i] = balance.ExpectingMoreEntries
}
+ chunkKVs = chunk.KVs
}
if err != nil {
@@ -468,9 +482,11 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var errBalances error
var errCreatables error
var errHashes error
+ var errKVs error
var durBalances time.Duration
var durCreatables time.Duration
var durHashes time.Duration
+ var durKVs time.Duration
// start the balances writer
wg.Add(1)
@@ -520,6 +536,21 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
durHashes = time.Since(start)
}()
+ // on a in-memory database, wait for the writer to finish before starting the new writer
+ if c.stagingWriter.isShared() {
+ wg.Wait()
+ }
+
+ // start the kv store writer
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ start := time.Now()
+ errKVs = c.stagingWriter.writeKVs(ctx, chunkKVs)
+ durKVs = time.Since(start)
+ }()
+
wg.Wait()
if errBalances != nil {
@@ -531,13 +562,18 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
if errHashes != nil {
return errHashes
}
+ if errKVs != nil {
+ return errKVs
+ }
progress.BalancesWriteDuration += durBalances
progress.CreatablesWriteDuration += durCreatables
progress.HashesWriteDuration += durHashes
+ progress.KVWriteDuration += durKVs
ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil)
progress.ProcessedBytes += uint64(len(bytes))
+ progress.ProcessedKVs += uint64(len(chunkKVs))
for _, acctBal := range normalizedAccountBalances {
progress.TotalAccountHashes += uint64(len(acctBal.accountHashes))
if !acctBal.partialBalance {
@@ -557,8 +593,24 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
return err
}
+// countHashes disambiguates the 2 hash types included in the merkle trie:
+// * accounts + createables (assets + apps)
+// * KVs
+//
+// The function is _not_ a general purpose way to count hashes by hash kind.
+func countHashes(hashes [][]byte) (accountCount, kvCount uint64) {
+ for _, hash := range hashes {
+ if hash[hashKindEncodingIndex] == byte(kvHK) {
+ kvCount++
+ } else {
+ accountCount++
+ }
+ }
+ return accountCount, kvCount
+}
+
// BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie
-func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error) {
+func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) {
wdb := c.ledger.trackerDB().Wdb
rdb := c.ledger.trackerDB().Rdb
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -618,11 +670,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
var trie *merkletrie.Trie
uncommitedHashesCount := 0
keepWriting := true
- hashesWritten := uint64(0)
+ accountHashesWritten, kvHashesWritten := uint64(0), uint64(0)
var mc *MerkleCommitter
- if progressUpdates != nil {
- progressUpdates(hashesWritten)
- }
err := wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
// create the merkle trie for the balances
@@ -659,18 +708,23 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
return
}
trie.SetCommitter(mc)
- for _, accountHash := range hashesToWrite {
+ for _, hash := range hashesToWrite {
var added bool
- added, err = trie.Add(accountHash)
+ added, err = trie.Add(hash)
if !added {
- return fmt.Errorf("CatchpointCatchupAccessorImpl::BuildMerkleTrie: The provided catchpoint file contained the same account more than once. hash '%s'", hex.EncodeToString(accountHash))
+ return fmt.Errorf("CatchpointCatchupAccessorImpl::BuildMerkleTrie: The provided catchpoint file contained the same account more than once. hash = '%s' hash kind = %s", hex.EncodeToString(hash), hashKind(hash[hashKindEncodingIndex]))
}
if err != nil {
return
}
+
}
uncommitedHashesCount += len(hashesToWrite)
- hashesWritten += uint64(len(hashesToWrite))
+
+ accounts, kvs := countHashes(hashesToWrite)
+ kvHashesWritten += kvs
+ accountHashesWritten += accounts
+
return nil
})
if err != nil {
@@ -698,8 +752,9 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
continue
}
}
+
if progressUpdates != nil {
- progressUpdates(hashesWritten)
+ progressUpdates(accountHashesWritten, kvHashesWritten)
}
}
if err != nil {
@@ -956,6 +1011,8 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
tp := trackerDBParams{
initAccounts: c.ledger.GenesisAccounts(),
initProto: c.ledger.GenesisProtoVersion(),
+ genesisHash: c.ledger.GenesisHash(),
+ fromCatchpoint: true,
catchpointEnabled: c.ledger.catchpoint.catchpointEnabled(),
dbPathPrefix: c.ledger.catchpoint.dbDirectory,
blockDb: c.ledger.blockDBs,
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 1394726bd..b95a924ce 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -55,8 +55,8 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
if accounts >= accountsCount-64*1024 && last64KIndex == -1 {
last64KIndex = len(encodedAccountChunks)
}
- var balances catchpointFileBalancesChunkV6
- balances.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ var chunk catchpointFileChunkV6
+ chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{}
@@ -67,9 +67,9 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
crypto.RandBytes(randomAccount.Address[:])
}
binary.LittleEndian.PutUint64(randomAccount.Address[:], accounts+i)
- balances.Balances[i] = randomAccount
+ chunk.Balances[i] = randomAccount
}
- encodedAccountChunks = append(encodedAccountChunks, protocol.Encode(&balances))
+ encodedAccountChunks = append(encodedAccountChunks, protocol.Encode(&chunk))
accounts += chunkSize
}
return
@@ -110,7 +110,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
}
encodedFileHeader := protocol.Encode(&fileHeader)
var progress CatchpointCatchupAccessorProgress
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress)
require.NoError(b, err)
// pre-create all encoded chunks.
@@ -126,7 +126,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
last64KStart = time.Now()
}
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
require.NoError(b, err)
last64KIndex--
}
@@ -218,7 +218,7 @@ func TestBuildMerkleTrie(t *testing.T) {
catchpointAccessor := MakeCatchpointCatchupAccessor(l, log)
progressCallCount := 0
- progressNop := func(uint64) {
+ progressNop := func(uint64, uint64) {
progressCallCount++
}
@@ -234,22 +234,21 @@ func TestBuildMerkleTrie(t *testing.T) {
require.NoError(t, err, "ResetStagingBalances")
err = catchpointAccessor.BuildMerkleTrie(ctx, progressNop)
require.NoError(t, err)
- require.True(t, progressCallCount > 0)
+ require.False(t, progressCallCount > 0)
// process some data...
- progressCallCount = 0
err = catchpointAccessor.ResetStagingBalances(ctx, true)
require.NoError(t, err, "ResetStagingBalances")
- // TODO: catchpointAccessor.ProgressStagingBalances() like in ledgerFetcher.downloadLedger(cs.ctx, peer, round) like catchup/catchpointService.go which is the real usage of BuildMerkleTrie()
+ // TODO: catchpointAccessor.ProcessStagingBalances() like in ledgerFetcher.downloadLedger(cs.ctx, peer, round) like catchup/catchpointService.go which is the real usage of BuildMerkleTrie()
var blob []byte = nil // TODO: content!
var progress CatchpointCatchupAccessorProgress
- err = catchpointAccessor.ProgressStagingBalances(ctx, "ignoredContent", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "ignoredContent", blob, &progress)
require.NoError(t, err)
// this shouldn't work yet
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
require.Error(t, err)
// this needs content
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", blob, &progress)
require.Error(t, err)
// content.msgpack from this:
@@ -265,14 +264,14 @@ func TestBuildMerkleTrie(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.NoError(t, err)
// shouldn't work a second time
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.Error(t, err)
// This should still fail, but slightly different coverage path
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
require.Error(t, err)
// create some catchpoint data
@@ -280,7 +279,7 @@ func TestBuildMerkleTrie(t *testing.T) {
for _, encodedAccounts := range encodedAccountChunks {
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
require.NoError(t, err)
}
@@ -316,13 +315,9 @@ func TestCatchupAccessorBlockdb(t *testing.T) {
}()
catchpointAccessor := MakeCatchpointCatchupAccessor(l, log)
ctx := context.Background()
- progressCallCount := 0
- progressNop := func(uint64) {
- progressCallCount++
- }
// actual testing...
- err = catchpointAccessor.BuildMerkleTrie(ctx, progressNop)
+ err = catchpointAccessor.BuildMerkleTrie(ctx, func(uint64, uint64) {})
require.Error(t, err)
}
@@ -406,10 +401,10 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.NoError(t, err)
- var balances catchpointFileBalancesChunkV6
+ var balances catchpointFileChunkV6
balances.Balances = make([]encodedBalanceRecordV6, 1)
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{}
@@ -422,7 +417,7 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
encodedAccounts := protocol.Encode(&balances)
// expect error since there is a resource count mismatch
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
require.Error(t, err)
}
@@ -439,6 +434,10 @@ func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []norm
return nil
}
+func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+ return nil
+}
+
func (w *testStagingWriter) writeHashes(ctx context.Context, balances []normalizedAccountBalance) error {
for _, bal := range balances {
for _, hash := range bal.accountHashes {
@@ -528,7 +527,7 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
}
// make chunks
- chunks := []catchpointFileBalancesChunkV6{
+ chunks := []catchpointFileChunkV6{
{
Balances: []encodedBalanceRecordV6{
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctA, nil, false),
diff --git a/ledger/internal/double_test.go b/ledger/double_test.go
index b33f3ca03..40e39b88c 100644
--- a/ledger/internal/double_test.go
+++ b/ledger/double_test.go
@@ -14,15 +14,15 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"testing"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
@@ -43,8 +43,8 @@ import (
type DoubleLedger struct {
t *testing.T
- generator *ledger.Ledger
- validator *ledger.Ledger
+ generator *Ledger
+ validator *Ledger
eval *internal.BlockEvaluator
}
@@ -56,8 +56,8 @@ func (dl DoubleLedger) Close() {
// NewDoubleLedger creates a new DoubleLedger with the supplied balances and consensus version.
func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) DoubleLedger {
- g := newTestLedgerWithConsensusVersion(t, balances, cv)
- v := newTestLedgerFull(t, balances, cv, g.GenesisHash())
+ g := newSimpleLedgerWithConsensusVersion(t, balances, cv)
+ v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash())
return DoubleLedger{t, g, v, nil}
}
@@ -124,17 +124,39 @@ func (dl *DoubleLedger) fullBlock(txs ...*txntest.Txn) *ledgercore.ValidatedBloc
func (dl *DoubleLedger) endBlock() *ledgercore.ValidatedBlock {
vb := endBlock(dl.t, dl.generator, dl.eval)
- checkBlock(dl.t, dl.validator, vb)
+ if dl.validator != nil { // Allows setting to nil while debugging, to simplify
+ checkBlock(dl.t, dl.validator, vb)
+ }
dl.eval = nil // Ensure it's not used again
return vb
}
+func (dl *DoubleLedger) fundedApp(sender basics.Address, amount uint64, source string) basics.AppIndex {
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: sender,
+ ApprovalProgram: source,
+ }
+ vb := dl.fullBlock(&createapp)
+ appIndex := vb.Block().Payset[0].ApplyData.ApplicationID
+
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: sender,
+ Receiver: appIndex.Address(),
+ Amount: amount,
+ }
+
+ dl.txn(&fund)
+ return appIndex
+}
+
func (dl *DoubleLedger) reloadLedgers() {
require.NoError(dl.t, dl.generator.ReloadLedger())
require.NoError(dl.t, dl.validator.ReloadLedger())
}
-func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.ValidatedBlock) {
+func checkBlock(t *testing.T, checkLedger *Ledger, vb *ledgercore.ValidatedBlock) {
bl := vb.Block()
msg := bl.MarshalMsg(nil)
var reconstituted bookkeeping.Block
@@ -160,9 +182,9 @@ func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.Validat
err := check.TransactionGroup(group)
require.NoError(t, err, "%+v", reconstituted.Payset)
}
- check.SetGenerate(true)
+ check.SetGenerateForTesting(true)
cb := endBlock(t, checkLedger, check)
- check.SetGenerate(false)
+ check.SetGenerateForTesting(false)
require.Equal(t, vb.Block(), cb.Block())
// vb.Delta() need not actually be Equal, in the sense of require.Equal
@@ -179,7 +201,7 @@ func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.Validat
// require.Equal(t, vb.Delta().Accts, cb.Delta().Accts)
}
-func nextCheckBlock(t testing.TB, ledger *ledger.Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
+func nextCheckBlock(t testing.TB, ledger *Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
rnd := ledger.Latest()
hdr, err := ledger.BlockHdr(rnd)
require.NoError(t, err)
diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go
new file mode 100644
index 000000000..802e8de9d
--- /dev/null
+++ b/ledger/eval_simple_test.go
@@ -0,0 +1,545 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+func TestBlockEvaluator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[1],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ // Correct signature should work
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Broken signature should fail
+ stbad := st
+ st.Sig[2] ^= 8
+ txgroup := []transactions.SignedTxn{stbad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ // Repeat should fail
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // out of range should fail
+ btxn := txn
+ btxn.FirstValid++
+ btxn.LastValid += 2
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // bogus group should fail
+ btxn = txn
+ btxn.Group[1] = 1
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // mixed fields should fail
+ btxn = txn
+ btxn.XferAsset = 3
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
+ // err = eval.Transaction(st, transactions.ApplyData{})
+ // require.Error(t, err)
+
+ selfTxn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[2],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[2],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := selfTxn.Sign(keys[2])
+
+ // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
+ txgroup = []transactions.SignedTxn{stxn}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ t3 := txn
+ t3.Amount.Raw++
+ t4 := selfTxn
+ t4.Amount.Raw++
+
+ // a group without .Group should fail
+ s3 := t3.Sign(keys[0])
+ s4 := t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // Test a group that should work
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
+ t3.Group = crypto.HashObj(group)
+ t4.Group = t3.Group
+ s3 = t3.Sign(keys[0])
+ s4 = t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ // disagreement on Group id should fail
+ t4bad := t4
+ t4bad.Group[3] ^= 3
+ s4bad := t4bad.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4bad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // missing part of the group should fail
+ txgroup = []transactions.SignedTxn{s3}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ accts := genesisInitState.Accounts
+ bal0 := accts[addrs[0]]
+ bal1 := accts[addrs[1]]
+ bal2 := accts[addrs[2]]
+
+ l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+
+ bal0new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[0])
+ require.NoError(t, err)
+ bal1new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[1])
+ require.NoError(t, err)
+ bal2new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[2])
+ require.NoError(t, err)
+
+ require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
+ require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
+ require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
+}
+
+func TestRekeying(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // t.Parallel() NO! This test manipulates []protocol.Consensus
+
+ // Pretend rekeying is supported
+ actual := config.Consensus[protocol.ConsensusCurrentVersion]
+ pretend := actual
+ pretend.SupportRekeying = true
+ config.Consensus[protocol.ConsensusCurrentVersion] = pretend
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = actual
+ }()
+
+ // Bring up a ledger
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ // Make a new block
+ nextRound := l.Latest() + basics.Round(1)
+ genHash := l.GenesisHash()
+
+ // Test plan
+ // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
+ makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: nextRound,
+ LastValid: nextRound,
+ GenesisHash: genHash,
+ RekeyTo: rekeyto,
+ Note: []byte{uniq},
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: sender,
+ },
+ }
+ sig := signer.Sign(txn)
+ return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
+ }
+
+ tryBlock := func(stxns []transactions.SignedTxn) error {
+ // We'll make a block using the evaluator.
+ // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
+ // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
+ genesisHdr, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisHdr)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ for _, stxn := range stxns {
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ return err
+ }
+ }
+ validatedBlock, err := eval.GenerateBlock()
+ if err != nil {
+ return err
+ }
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+ _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ return err
+ }
+
+ // Preamble transactions, which all of the blocks in this test will start with
+ // [A -> 0][0,A] (normal transaction)
+ // [A -> B][0,A] (rekey)
+ txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
+ txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
+
+ // Test 1: Do only good things
+ // (preamble)
+ // [A -> 0][B,B] (normal transaction using new key)
+ // [A -> A][B,B] (rekey back to A, transaction still signed by B)
+ // [A -> 0][0,A] (normal transaction again)
+ test1txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
+ makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
+ makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
+ }
+ err = tryBlock(test1txns)
+ require.NoError(t, err)
+
+ // Test 2: Use old key after rekeying
+ // (preamble)
+ // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
+ test2txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
+ }
+ err = tryBlock(test2txns)
+ require.Error(t, err)
+
+ // TODO: More tests
+}
+
+func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, consensusVersion)
+ defer l.Close()
+
+ eval := nextBlock(t, l)
+
+ appcall1 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ GlobalStateSchema: schema,
+ ApprovalProgram: approvalProgram,
+ }
+
+ appcall2 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ appcall3 := txntest.Txn{
+ Sender: addrs[1],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
+}
+
+// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
+// budgets in a group txn and return an error if the budget is exceeded
+func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := func(n int, m int) string {
+ return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
+ strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
+ }
+
+ params := []protocol.ConsensusVersion{
+ protocol.ConsensusV29,
+ protocol.ConsensusFuture,
+ }
+
+ cases := []struct {
+ prog string
+ isSuccessV29 bool
+ isSuccessVFuture bool
+ expectedErrorV29 string
+ expectedErrorVFuture string
+ }{
+ {source(5, 47), true, true,
+ "",
+ ""},
+ {source(5, 48), false, true,
+ "pc=157 dynamic cost budget exceeded, executing pushint",
+ ""},
+ {source(16, 17), false, true,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256",
+ ""},
+ {source(16, 18), false, false,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256",
+ "pc= 78 dynamic cost budget exceeded, executing pushint"},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
+ if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorV29)
+ } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
+ }
+ })
+ }
+ }
+}
+
+func TestMinBalanceChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusCurrentVersion)
+ defer l.Close()
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Manager: addrs[1],
+ Reserve: addrs[2],
+ Freeze: addrs[3],
+ Clawback: addrs[4],
+ },
+ }
+
+ const expectedID basics.AssetIndex = 1
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[5],
+ }
+
+ ad0init, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5init, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+
+ eval := nextBlock(t, l)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ endBlock(t, l, eval)
+
+ ad0new, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5new, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+
+ proto := l.GenesisProto()
+ // Check balance and min balance requirement changes
+ require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
+ require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[1], // The manager, not the creator
+ ConfigAsset: expectedID,
+ }
+
+ eval = nextBlock(t, l)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ endBlock(t, l, eval)
+
+ ad0final, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5final, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+ // Check we got our balance "back"
+ require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
+ require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
+}
+
+// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
+// and do not cause any MaximumMinimumBalance problems
+func TestAppInsMinBalance(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusV30)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ maxAppsOptedIn := config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
+ require.Greater(t, maxAppsOptedIn, 0)
+ maxAppsCreated := config.Consensus[protocol.ConsensusV30].MaxAppsCreated
+ require.Greater(t, maxAppsCreated, 0)
+ maxLocalSchemaEntries := config.Consensus[protocol.ConsensusV30].MaxLocalSchemaEntries
+ require.Greater(t, maxLocalSchemaEntries, uint64(0))
+
+ txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ appsCreated := make(map[basics.Address]int, len(addrs)-1)
+
+ acctIdx := 0
+ for i := 0; i < maxAppsOptedIn; i++ {
+ creator := addrs[acctIdx]
+ createTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: creator,
+ ApprovalProgram: "int 1",
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ Note: ledgertesting.RandomNote(),
+ }
+ txnsCreate = append(txnsCreate, &createTxn)
+ count := appsCreated[creator]
+ count++
+ appsCreated[creator] = count
+ if count == maxAppsCreated {
+ acctIdx++
+ }
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[9],
+ ApplicationID: appid + basics.AppIndex(i),
+ OnCompletion: transactions.OptInOC,
+ }
+ txnsOptIn = append(txnsOptIn, &optInTxn)
+ }
+
+ eval := nextBlock(t, l)
+ txns1 := append(txnsCreate, txnsOptIn...)
+ txns(t, l, eval, txns1...)
+ vb := endBlock(t, l, eval)
+ mods := vb.Delta()
+ appAppResources := mods.Accts.GetAllAppResources()
+ appParamsCount := 0
+ appLocalStatesCount := 0
+ for _, ap := range appAppResources {
+ if ap.Params.Params != nil {
+ appParamsCount++
+ }
+ if ap.State.LocalState != nil {
+ appLocalStatesCount++
+ }
+ }
+ require.Equal(t, appLocalStatesCount, 50)
+ require.Equal(t, appParamsCount, 50)
+}
diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go
index 5f11874c4..3e2d8ca34 100644
--- a/ledger/evalindexer.go
+++ b/ledger/evalindexer.go
@@ -43,6 +43,7 @@ type indexerLedgerForEval interface {
GetAssetCreator(map[basics.AssetIndex]struct{}) (map[basics.AssetIndex]FoundAddress, error)
GetAppCreator(map[basics.AppIndex]struct{}) (map[basics.AppIndex]FoundAddress, error)
LatestTotals() (ledgercore.AccountTotals, error)
+ LookupKv(basics.Round, string) ([]byte, error)
BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
}
@@ -78,6 +79,8 @@ type indexerLedgerConnector struct {
roundResources EvalForIndexerResources
}
+func (l indexerLedgerConnector) FlushCaches() {}
+
// BlockHdr is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) {
if round != l.latestRound {
@@ -149,6 +152,15 @@ func (l indexerLedgerConnector) lookupResource(round basics.Round, address basic
return accountResourceMap[address][Creatable{aidx, ctype}], nil
}
+// LookupKv delegates to the Ledger and marks the box key as touched for post-processing
+func (l indexerLedgerConnector) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ value, err := l.il.LookupKv(rnd, key)
+ if err != nil {
+ return value, fmt.Errorf("LookupKv() in indexerLedgerConnector internal error: %w", err)
+ }
+ return value, nil
+}
+
// GetCreatorForRound is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
var foundAddress FoundAddress
diff --git a/ledger/evalindexer_test.go b/ledger/evalindexer_test.go
index 23997afa5..77b7dbde7 100644
--- a/ledger/evalindexer_test.go
+++ b/ledger/evalindexer_test.go
@@ -107,6 +107,11 @@ func (il indexerLedgerForEvalImpl) GetAppCreator(map[basics.AppIndex]struct{}) (
return nil, errors.New("GetAppCreator() not implemented")
}
+func (il indexerLedgerForEvalImpl) LookupKv(basics.Round, string) ([]byte, error) {
+ // This function is unused.
+ return nil, errors.New("LookupKv() not implemented")
+}
+
func (il indexerLedgerForEvalImpl) LatestTotals() (totals ledgercore.AccountTotals, err error) {
_, totals, err = il.l.LatestTotals()
return
diff --git a/ledger/hashkind_string.go b/ledger/hashkind_string.go
new file mode 100644
index 000000000..6549ae63b
--- /dev/null
+++ b/ledger/hashkind_string.go
@@ -0,0 +1,26 @@
+// Code generated by "stringer -type=hashKind"; DO NOT EDIT.
+
+package ledger
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[accountHK-0]
+ _ = x[assetHK-1]
+ _ = x[appHK-2]
+ _ = x[kvHK-3]
+}
+
+const _hashKind_name = "accountHKassetHKappHKkvHK"
+
+var _hashKind_index = [...]uint8{0, 9, 16, 21, 25}
+
+func (i hashKind) String() string {
+ if i >= hashKind(len(_hashKind_index)-1) {
+ return "hashKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _hashKind_name[_hashKind_index[i]:_hashKind_index[i+1]]
+}
diff --git a/ledger/internal/appcow.go b/ledger/internal/appcow.go
index 503ddd06b..05d3d3df7 100644
--- a/ledger/internal/appcow.go
+++ b/ledger/internal/appcow.go
@@ -243,11 +243,11 @@ func (cb *roundCowState) AllocateApp(addr basics.Address, aidx basics.AppIndex,
lsd.maxCounts = &space
if global {
- cb.mods.Creatables[basics.CreatableIndex(aidx)] = ledgercore.ModifiedCreatable{
+ cb.mods.AddCreatable(basics.CreatableIndex(aidx), ledgercore.ModifiedCreatable{
Ctype: basics.AppCreatable,
Creator: addr,
Created: true,
- }
+ })
}
return nil
}
@@ -275,20 +275,15 @@ func (cb *roundCowState) DeallocateApp(addr basics.Address, aidx basics.AppIndex
lsd.kvCow = make(stateDelta)
if global {
- cb.mods.Creatables[basics.CreatableIndex(aidx)] = ledgercore.ModifiedCreatable{
+ cb.mods.AddCreatable(basics.CreatableIndex(aidx), ledgercore.ModifiedCreatable{
Ctype: basics.AppCreatable,
Creator: addr,
Created: false,
- }
+ })
}
return nil
}
-// GetKey looks for a key in {addr, aidx, global} storage
-func (cb *roundCowState) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- return cb.getKey(addr, aidx, global, key, accountIdx)
-}
-
// getKey looks for a key in {addr, aidx, global} storage
// This is hierarchical lookup: if the key not in this cow cache, then request parent and all way down to ledger
func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
@@ -339,8 +334,8 @@ func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, globa
return cb.lookupParent.getKey(addr, aidx, global, key, accountIdx)
}
-// SetKey creates a new key-value in {addr, aidx, global} storage
-func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
+// setKey creates a new key-value in {addr, aidx, global} storage
+func (cb *roundCowState) setKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
// Enforce maximum key length
if len(key) > cb.proto.MaxAppKeyLen {
return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cb.proto.MaxAppKeyLen)
@@ -368,7 +363,7 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa
}
// Fetch the old value + presence so we know how to update
- oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx)
+ oldValue, oldOk, err := cb.getKey(addr, aidx, global, key, accountIdx)
if err != nil {
return err
}
@@ -398,8 +393,8 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa
return lsd.checkCounts()
}
-// DelKey removes a key from {addr, aidx, global} storage
-func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
+// delKey removes a key from {addr, aidx, global} storage
+func (cb *roundCowState) delKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
// Check that account has allocated storage
allocated, err := cb.allocated(addr, aidx, global)
if err != nil {
@@ -411,7 +406,7 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa
}
// Fetch the old value + presence so we know how to update counts
- oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx)
+ oldValue, oldOk, err := cb.getKey(addr, aidx, global, key, accountIdx)
if err != nil {
return err
}
@@ -461,7 +456,7 @@ func MakeDebugBalances(l LedgerForCowBase, round basics.Round, proto protocol.Co
func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx basics.AppIndex, program []byte) (pass bool, evalDelta transactions.EvalDelta, err error) {
// Make a child cow to eval our program in
calf := cb.child(1)
- params.Ledger = newLogicLedger(calf)
+ params.Ledger = calf
// Eval the program
pass, cx, err := logic.EvalContract(program, gi, aidx, params)
@@ -487,7 +482,7 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
// changes from this app and any inner called apps. Instead, we now keep
// the EvalDelta built as we go, in app evaluation. So just use it.
if cb.proto.LogicSigVersion < 6 {
- evalDelta, err = calf.BuildEvalDelta(aidx, &params.TxnGroup[gi].Txn)
+ evalDelta, err = calf.buildEvalDelta(aidx, &params.TxnGroup[gi].Txn)
if err != nil {
return false, transactions.EvalDelta{}, err
}
@@ -502,9 +497,9 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
return pass, evalDelta, nil
}
-// BuildEvalDelta creates an EvalDelta by converting internal sdeltas
+// buildEvalDelta creates an EvalDelta by converting internal sdeltas
// into the (Global|Local)Delta fields.
-func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
+func (cb *roundCowState) buildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
// sdeltas
foundGlobal := false
for addr, smod := range cb.sdeltas {
diff --git a/ledger/internal/appcow_test.go b/ledger/internal/appcow_test.go
index 76b4f1f8d..db659636b 100644
--- a/ledger/internal/appcow_test.go
+++ b/ledger/internal/appcow_test.go
@@ -87,7 +87,11 @@ func (ml *emptyLedger) getKey(addr basics.Address, aidx basics.AppIndex, global
return basics.TealValue{}, false, nil
}
-func (ml *emptyLedger) txnCounter() uint64 {
+func (ml *emptyLedger) kvGet(key string) ([]byte, bool, error) {
+ return nil, false, nil
+}
+
+func (ml *emptyLedger) Counter() uint64 {
return 0
}
@@ -115,7 +119,7 @@ func getCow(creatables []modsData) *roundCowState {
proto: config.Consensus[protocol.ConsensusCurrentVersion],
}
for _, e := range creatables {
- cs.mods.Creatables[e.cidx] = ledgercore.ModifiedCreatable{Ctype: e.ctype, Creator: e.addr, Created: true}
+ cs.mods.AddCreatable(e.cidx, ledgercore.ModifiedCreatable{Ctype: e.ctype, Creator: e.addr, Created: true})
}
return cs
}
@@ -287,7 +291,7 @@ func TestCowStorage(t *testing.T) {
actuallyAllocated := st.allocated(aapp)
rkey := allKeys[rand.Intn(len(allKeys))]
rval := allValues[rand.Intn(len(allValues))]
- err := cow.SetKey(addr, sptr.aidx, sptr.global, rkey, rval, 0)
+ err := cow.setKey(addr, sptr.aidx, sptr.global, rkey, rval, 0)
if actuallyAllocated {
require.NoError(t, err)
err = st.set(aapp, rkey, rval)
@@ -302,7 +306,7 @@ func TestCowStorage(t *testing.T) {
if rand.Float32() < 0.25 {
actuallyAllocated := st.allocated(aapp)
rkey := allKeys[rand.Intn(len(allKeys))]
- err := cow.DelKey(addr, sptr.aidx, sptr.global, rkey, 0)
+ err := cow.delKey(addr, sptr.aidx, sptr.global, rkey, 0)
if actuallyAllocated {
require.NoError(t, err)
err = st.del(aapp, rkey)
@@ -344,7 +348,7 @@ func TestCowStorage(t *testing.T) {
tval, tok, err := st.get(aapp, key)
require.NoError(t, err)
- cval, cok, err := cow.GetKey(addr, sptr.aidx, sptr.global, key, 0)
+ cval, cok, err := cow.getKey(addr, sptr.aidx, sptr.global, key, 0)
require.NoError(t, err)
require.Equal(t, tok, cok)
require.Equal(t, tval, cval)
@@ -383,29 +387,29 @@ func TestCowBuildDelta(t *testing.T) {
cow := roundCowState{}
cow.sdeltas = make(map[basics.Address]map[storagePtr]*storageDelta)
txn := transactions.Transaction{}
- ed, err := cow.BuildEvalDelta(aidx, &txn)
+ ed, err := cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Empty(ed)
cow.sdeltas[creator] = make(map[storagePtr]*storageDelta)
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Empty(ed)
// check global delta
cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(1, &txn)
+ ed, err = cow.buildEvalDelta(1, &txn)
a.Error(err)
a.Contains(err.Error(), "found storage delta for different app")
a.Empty(ed)
cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(transactions.EvalDelta{GlobalDelta: basics.StateDelta{}}, ed)
cow.sdeltas[creator][storagePtr{aidx + 1, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "found storage delta for different app")
a.Empty(ed)
@@ -413,7 +417,7 @@ func TestCowBuildDelta(t *testing.T) {
delete(cow.sdeltas[creator], storagePtr{aidx + 1, true})
cow.sdeltas[sender] = make(map[storagePtr]*storageDelta)
cow.sdeltas[sender][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "found more than one global delta")
a.Empty(ed)
@@ -422,7 +426,7 @@ func TestCowBuildDelta(t *testing.T) {
delete(cow.sdeltas[sender], storagePtr{aidx, true})
cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "invalid Account reference ")
a.Empty(ed)
@@ -432,7 +436,7 @@ func TestCowBuildDelta(t *testing.T) {
cow.mods.Hdr = &bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{CurrentProtocol: protocol.ConsensusV25},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -445,7 +449,7 @@ func TestCowBuildDelta(t *testing.T) {
// check v27 behavior for empty deltas
cow.mods.Hdr = nil
cow.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -468,7 +472,7 @@ func TestCowBuildDelta(t *testing.T) {
},
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -508,7 +512,7 @@ func TestCowBuildDelta(t *testing.T) {
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -542,7 +546,7 @@ func TestCowBuildDelta(t *testing.T) {
},
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -573,7 +577,7 @@ func TestCowBuildDelta(t *testing.T) {
},
accountIdx: 1,
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -601,7 +605,7 @@ func TestCowBuildDelta(t *testing.T) {
},
accountIdx: 1,
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -1067,8 +1071,8 @@ func TestCowGetters(t *testing.T) {
ts := int64(11223344)
c.mods.PrevTimestamp = ts
- a.Equal(round, c.round())
- a.Equal(ts, c.prevTimestamp())
+ a.Equal(round, c.Round())
+ a.Equal(ts, c.PrevTimestamp())
}
func TestCowGet(t *testing.T) {
@@ -1108,7 +1112,7 @@ func TestCowGetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- _, ok, err := c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err := c.getKey(addr, aidx, true, "gkey", 0)
a.Error(err)
a.False(ok)
a.Contains(err.Error(), "cannot fetch key")
@@ -1116,10 +1120,10 @@ func TestCowGetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: allocAction}},
}
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
@@ -1132,7 +1136,7 @@ func TestCowGetKey(t *testing.T) {
},
},
}
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
@@ -1144,7 +1148,7 @@ func TestCowGetKey(t *testing.T) {
},
},
}
- val, ok, err := c.GetKey(addr, aidx, true, "gkey", 0)
+ val, ok, err := c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.True(ok)
a.Equal(tv, val)
@@ -1159,14 +1163,14 @@ func TestCowGetKey(t *testing.T) {
},
}
- val, ok, err = c.GetKey(addr, aidx, false, "lkey", 0)
+ val, ok, err = c.getKey(addr, aidx, false, "lkey", 0)
a.NoError(err)
a.True(ok)
a.Equal(tv, val)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.GetKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
- a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey", 0) })
+ a.Panics(func() { c.getKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
+ a.Panics(func() { c.getKey(addr, aidx+1, false, "lkey", 0) })
}
func TestCowSetKey(t *testing.T) {
@@ -1183,14 +1187,14 @@ func TestCowSetKey(t *testing.T) {
key := strings.Repeat("key", 100)
val := "val"
tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err := c.SetKey(addr, aidx, true, key, tv, 0)
+ err := c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key too long")
key = "key"
val = strings.Repeat("val", 100)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "value too long")
@@ -1199,7 +1203,7 @@ func TestCowSetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "cannot set key")
@@ -1215,13 +1219,13 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "exceeds schema bytes")
counts = basics.StateSchema{NumUint: 1}
maxCounts = basics.StateSchema{NumByteSlice: 1}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "exceeds schema integer")
@@ -1236,12 +1240,12 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.NoError(err)
counts = basics.StateSchema{NumUint: 1}
maxCounts = basics.StateSchema{NumByteSlice: 1, NumUint: 1}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.NoError(err)
// check local
@@ -1256,12 +1260,12 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr1, aidx, false, key, tv, 0)
+ err = c.setKey(addr1, aidx, false, key, tv, 0)
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.SetKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
- a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv, 0) })
+ a.Panics(func() { c.setKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
+ a.Panics(func() { c.setKey(addr, aidx+1, false, key, tv, 0) })
}
func TestCowSetKeyVFuture(t *testing.T) {
@@ -1280,21 +1284,21 @@ func TestCowSetKeyVFuture(t *testing.T) {
key := strings.Repeat("key", 100)
val := "val"
tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err := c.SetKey(addr, aidx, true, key, tv, 0)
+ err := c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key too long")
key = "key"
val = strings.Repeat("val", 100)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "value too long")
key = strings.Repeat("k", protoF.MaxAppKeyLen)
val = strings.Repeat("v", protoF.MaxAppSumKeyValueLens-len(key)+1)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key/value total too long")
}
@@ -1362,7 +1366,7 @@ func TestCowDelKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- err := c.DelKey(addr, aidx, true, key, 0)
+ err := c.delKey(addr, aidx, true, key, 0)
a.Error(err)
a.Contains(err.Error(), "cannot del key")
@@ -1378,7 +1382,7 @@ func TestCowDelKey(t *testing.T) {
},
},
}
- err = c.DelKey(addr, aidx, true, key, 0)
+ err = c.delKey(addr, aidx, true, key, 0)
a.NoError(err)
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
@@ -1391,10 +1395,10 @@ func TestCowDelKey(t *testing.T) {
},
},
}
- err = c.DelKey(addr, aidx, false, key, 0)
+ err = c.delKey(addr, aidx, false, key, 0)
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.DelKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
- a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) })
+ a.Panics(func() { c.delKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
+ a.Panics(func() { c.delKey(addr, aidx+1, false, key, 0) })
}
diff --git a/ledger/internal/applications.go b/ledger/internal/applications.go
index 27d306ac9..1bf694eed 100644
--- a/ledger/internal/applications.go
+++ b/ledger/internal/applications.go
@@ -21,55 +21,25 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
-type logicLedger struct {
- cow cowForLogicLedger
-}
-
-type cowForLogicLedger interface {
- Get(addr basics.Address, withPendingRewards bool) (ledgercore.AccountData, error)
- GetAppParams(addr basics.Address, aidx basics.AppIndex) (basics.AppParams, bool, error)
- GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (basics.AssetParams, bool, error)
- GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (basics.AssetHolding, bool, error)
- GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
- GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error)
- BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (transactions.EvalDelta, error)
-
- SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error
- DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error
-
- round() basics.Round
- prevTimestamp() int64
- allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
- txnCounter() uint64
- incTxnCount()
-
- // The method should use the txtail to ensure MaxTxnLife+1 headers back are available
- blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error)
-}
+/* This file adds functions to roundCowState that make it more palatable for use
+ outside of the ledger package. The LedgerForLogic interface expects them. */
-func newLogicLedger(cow cowForLogicLedger) *logicLedger {
- return &logicLedger{
- cow: cow,
- }
-}
-
-func (al *logicLedger) AccountData(addr basics.Address) (ledgercore.AccountData, error) {
- record, err := al.cow.Get(addr, true)
+func (cs *roundCowState) AccountData(addr basics.Address) (ledgercore.AccountData, error) {
+ record, err := cs.Get(addr, true)
if err != nil {
return ledgercore.AccountData{}, err
}
return record, nil
}
-func (al *logicLedger) Authorizer(addr basics.Address) (basics.Address, error) {
- record, err := al.cow.Get(addr, false) // pending rewards unneeded
+func (cs *roundCowState) Authorizer(addr basics.Address) (basics.Address, error) {
+ record, err := cs.Get(addr, false) // pending rewards unneeded
if err != nil {
return basics.Address{}, err
}
@@ -79,25 +49,24 @@ func (al *logicLedger) Authorizer(addr basics.Address) (basics.Address, error) {
return addr, nil
}
-func (al *logicLedger) AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error) {
+func (cs *roundCowState) AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error) {
// Fetch the requested balance record
- holding, ok, err := al.cow.GetAssetHolding(addr, assetIdx)
+ holding, ok, err := cs.GetAssetHolding(addr, assetIdx)
if err != nil {
return basics.AssetHolding{}, err
}
// Ensure we have the requested holding
if !ok {
- err = fmt.Errorf("account %s has not opted in to asset %d", addr.String(), assetIdx)
- return basics.AssetHolding{}, err
+ return basics.AssetHolding{}, fmt.Errorf("account %s has not opted in to asset %d", addr, assetIdx)
}
return holding, nil
}
-func (al *logicLedger) AssetParams(assetIdx basics.AssetIndex) (basics.AssetParams, basics.Address, error) {
+func (cs *roundCowState) AssetParams(assetIdx basics.AssetIndex) (basics.AssetParams, basics.Address, error) {
// Find asset creator
- creator, ok, err := al.cow.GetCreator(basics.CreatableIndex(assetIdx), basics.AssetCreatable)
+ creator, ok, err := cs.GetCreator(basics.CreatableIndex(assetIdx), basics.AssetCreatable)
if err != nil {
return basics.AssetParams{}, creator, err
}
@@ -108,23 +77,22 @@ func (al *logicLedger) AssetParams(assetIdx basics.AssetIndex) (basics.AssetPara
}
// Fetch the requested balance record
- params, ok, err := al.cow.GetAssetParams(creator, assetIdx)
+ params, ok, err := cs.GetAssetParams(creator, assetIdx)
if err != nil {
return basics.AssetParams{}, creator, err
}
// Ensure account created the requested asset
if !ok {
- err = fmt.Errorf("account %s has not created asset %d", creator, assetIdx)
- return basics.AssetParams{}, creator, err
+ return basics.AssetParams{}, creator, fmt.Errorf("account %s has not created asset %d", creator, assetIdx)
}
return params, creator, nil
}
-func (al *logicLedger) AppParams(appIdx basics.AppIndex) (basics.AppParams, basics.Address, error) {
+func (cs *roundCowState) AppParams(appIdx basics.AppIndex) (basics.AppParams, basics.Address, error) {
// Find app creator
- creator, ok, err := al.cow.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
+ creator, ok, err := cs.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
if err != nil {
return basics.AppParams{}, creator, err
}
@@ -135,51 +103,42 @@ func (al *logicLedger) AppParams(appIdx basics.AppIndex) (basics.AppParams, basi
}
// Fetch the requested balance record
- params, ok, err := al.cow.GetAppParams(creator, appIdx)
+ params, ok, err := cs.GetAppParams(creator, appIdx)
if err != nil {
return basics.AppParams{}, creator, err
}
// Ensure account created the requested app
if !ok {
- err = fmt.Errorf("account %s has not created app %d", creator, appIdx)
- return basics.AppParams{}, creator, err
+ return basics.AppParams{}, creator, fmt.Errorf("account %s has not created app %d", creator, appIdx)
}
return params, creator, nil
}
-func (al *logicLedger) Round() basics.Round {
- return al.cow.round()
+func (cs *roundCowState) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
+ return cs.allocated(addr, appIdx, false)
}
-func (al *logicLedger) LatestTimestamp() int64 {
- return al.cow.prevTimestamp()
+func (cs *roundCowState) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
+ return cs.getKey(addr, appIdx, false, key, accountIdx)
}
-func (al *logicLedger) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
- return al.cow.blockHdrCached(round)
+func (cs *roundCowState) SetLocal(addr basics.Address, appIdx basics.AppIndex, key string, value basics.TealValue, accountIdx uint64) error {
+ return cs.setKey(addr, appIdx, false, key, value, accountIdx)
}
-func (al *logicLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
- return al.cow.allocated(addr, appIdx, false)
+func (cs *roundCowState) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return cs.blockHdrCached(round)
}
-func (al *logicLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- return al.cow.GetKey(addr, appIdx, false, key, accountIdx)
+func (cs *roundCowState) DelLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) error {
+ return cs.delKey(addr, appIdx, false, key, accountIdx)
}
-func (al *logicLedger) SetLocal(addr basics.Address, appIdx basics.AppIndex, key string, value basics.TealValue, accountIdx uint64) error {
- return al.cow.SetKey(addr, appIdx, false, key, value, accountIdx)
-}
-
-func (al *logicLedger) DelLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) error {
- return al.cow.DelKey(addr, appIdx, false, key, accountIdx)
-}
-
-func (al *logicLedger) fetchAppCreator(appIdx basics.AppIndex) (basics.Address, error) {
+func (cs *roundCowState) fetchAppCreator(appIdx basics.AppIndex) (basics.Address, error) {
// Fetch the application creator
- addr, ok, err := al.cow.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
+ addr, ok, err := cs.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
if err != nil {
return basics.Address{}, err
@@ -190,52 +149,158 @@ func (al *logicLedger) fetchAppCreator(appIdx basics.AppIndex) (basics.Address,
return addr, nil
}
-func (al *logicLedger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
- addr, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return basics.TealValue{}, false, err
}
- return al.cow.GetKey(addr, appIdx, true, key, 0)
+ return cs.getKey(creator, appIdx, true, key, 0)
}
-func (al *logicLedger) SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error {
- creator, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return err
}
- return al.cow.SetKey(creator, appIdx, true, key, value, 0)
+ return cs.setKey(creator, appIdx, true, key, value, 0)
}
-func (al *logicLedger) DelGlobal(appIdx basics.AppIndex, key string) error {
- creator, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) DelGlobal(appIdx basics.AppIndex, key string) error {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return err
}
- return al.cow.DelKey(creator, appIdx, true, key, 0)
+ return cs.delKey(creator, appIdx, true, key, 0)
}
-func (al *logicLedger) balances() (apply.Balances, error) {
- balances, ok := al.cow.(apply.Balances)
+func (cs *roundCowState) kvGet(key string) ([]byte, bool, error) {
+ value, ok := cs.mods.KvMods[key]
if !ok {
- return nil, fmt.Errorf("cannot get a Balances object from %v", al)
+ return cs.lookupParent.kvGet(key)
}
- return balances, nil
+ // If value is nil, it's a marker for a local deletion
+ return value.Data, value.Data != nil, nil
}
-func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
- txn := &ep.TxnGroup[gi]
- balances, err := al.balances()
+func (cb *roundCowBase) kvGet(key string) ([]byte, bool, error) {
+ value, ok := cb.kvStore[key]
+ if !ok {
+ v, err := cb.l.LookupKv(cb.rnd, key)
+ if err != nil {
+ return nil, false, err
+ }
+ value = v
+ cb.kvStore[key] = value
+ }
+ // If value is nil, it caches a lookup that returned nothing.
+ return value, value != nil, nil
+}
+
+func (cs *roundCowState) kvPut(key string, value []byte) error {
+ cs.mods.AddKvMod(key, ledgercore.KvValueDelta{Data: value})
+ return nil
+}
+
+func (cs *roundCowState) kvDel(key string) error {
+ cs.mods.AddKvMod(key, ledgercore.KvValueDelta{Data: nil})
+ return nil
+}
+
+func (cs *roundCowState) NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error {
+ // Use same limit on key length as for global/local storage
+ if len(key) > cs.proto.MaxAppKeyLen {
+ return fmt.Errorf("name too long: length was %d, maximum is %d", len(key), cs.proto.MaxAppKeyLen)
+ }
+ // This rule is NOT like global/local storage, but seems like it will limit
+ // confusion, since these are standalone entities.
+ if len(key) == 0 {
+ return fmt.Errorf("box names may not be zero length")
+ }
+
+ size := uint64(len(value))
+ if size > cs.proto.MaxBoxSize {
+ return fmt.Errorf("box size too large: %d, maximum is %d", size, cs.proto.MaxBoxSize)
+ }
+
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ _, exists, err := cs.kvGet(fullKey)
if err != nil {
return err
}
+ if exists {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+
+ record, err := cs.Get(appAddr, false)
+ if err != nil {
+ return err
+ }
+ record.TotalBoxes = basics.AddSaturate(record.TotalBoxes, 1)
+ record.TotalBoxBytes = basics.AddSaturate(record.TotalBoxBytes, uint64(len(key))+size)
+ err = cs.Put(appAddr, record)
+ if err != nil {
+ return err
+ }
+
+ return cs.kvPut(fullKey, value)
+}
+
+func (cs *roundCowState) GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error) {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ return cs.kvGet(fullKey)
+}
+
+func (cs *roundCowState) SetBox(appIdx basics.AppIndex, key string, value []byte) error {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ old, ok, err := cs.kvGet(fullKey)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("box %s does not exist for %d", key, appIdx)
+ }
+ if len(old) != len(value) {
+ return fmt.Errorf("box %s is wrong size old:%d != new:%d",
+ key, len(old), len(value))
+ }
+ return cs.kvPut(fullKey, value)
+}
+
+func (cs *roundCowState) DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error) {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+
+ value, ok, err := cs.kvGet(fullKey)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+
+ record, err := cs.Get(appAddr, false)
+ if err != nil {
+ return false, err
+ }
+ record.TotalBoxes = basics.SubSaturate(record.TotalBoxes, 1)
+ record.TotalBoxBytes = basics.SubSaturate(record.TotalBoxBytes, uint64(len(key)+len(value)))
+ err = cs.Put(appAddr, record)
+ if err != nil {
+ return false, err
+ }
+
+ return true, cs.kvDel(fullKey)
+}
+
+func (cs *roundCowState) Perform(gi int, ep *logic.EvalParams) error {
+ txn := &ep.TxnGroup[gi]
// move fee to pool
- err = balances.Move(txn.Txn.Sender, ep.Specials.FeeSink, txn.Txn.Fee, &txn.ApplyData.SenderRewards, nil)
+ err := cs.Move(txn.Txn.Sender, ep.Specials.FeeSink, txn.Txn.Fee, &txn.ApplyData.SenderRewards, nil)
if err != nil {
return err
}
- err = apply.Rekey(balances, &txn.Txn)
+ err = apply.Rekey(cs, &txn.Txn)
if err != nil {
return err
}
@@ -249,29 +314,29 @@ func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
// ahead of processing, we'd have to do ours *after* so that we'd
// use the next id. So either way, this would seem backwards at
// first glance.
- al.cow.incTxnCount()
+ cs.incTxnCount()
switch txn.Txn.Type {
case protocol.PaymentTx:
- err = apply.Payment(txn.Txn.PaymentTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.Payment(txn.Txn.PaymentTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(txn.Txn.KeyregTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData,
- al.Round())
+ err = apply.Keyreg(txn.Txn.KeyregTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData,
+ cs.Round())
case protocol.AssetConfigTx:
- err = apply.AssetConfig(txn.Txn.AssetConfigTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData,
- al.cow.txnCounter())
+ err = apply.AssetConfig(txn.Txn.AssetConfigTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData,
+ cs.Counter())
case protocol.AssetTransferTx:
- err = apply.AssetTransfer(txn.Txn.AssetTransferTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.AssetTransfer(txn.Txn.AssetTransferTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.AssetFreezeTx:
- err = apply.AssetFreeze(txn.Txn.AssetFreezeTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.AssetFreeze(txn.Txn.AssetFreezeTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.ApplicationCallTx:
- err = apply.ApplicationCall(txn.Txn.ApplicationCallTxnFields, txn.Txn.Header, balances, &txn.ApplyData,
- gi, ep, al.cow.txnCounter())
+ err = apply.ApplicationCall(txn.Txn.ApplicationCallTxnFields, txn.Txn.Header, cs, &txn.ApplyData,
+ gi, ep, cs.Counter())
default:
err = fmt.Errorf("%s tx in AVM", txn.Txn.Type)
@@ -287,9 +352,4 @@ func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
// modifiedAccounts().
return nil
-
-}
-
-func (al *logicLedger) Counter() uint64 {
- return al.cow.txnCounter()
}
diff --git a/ledger/internal/applications_test.go b/ledger/internal/applications_test.go
deleted file mode 100644
index ea28712c9..000000000
--- a/ledger/internal/applications_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal
-
-import (
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-type creatableLocator struct {
- cidx basics.CreatableIndex
- ctype basics.CreatableType
-}
-type storeLocator struct {
- addr basics.Address
- aidx basics.AppIndex
- global bool
-}
-type mockCowForLogicLedger struct {
- rnd basics.Round
- ts int64
- cr map[creatableLocator]basics.Address
- brs map[basics.Address]basics.AccountData
- stores map[storeLocator]basics.TealKeyValue
- txc uint64
-}
-
-func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (ledgercore.AccountData, error) {
- acct, err := c.getAccount(addr, withPendingRewards)
- return ledgercore.ToAccountData(acct), err
-}
-
-func (c *mockCowForLogicLedger) getAccount(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br, nil
-}
-
-func (c *mockCowForLogicLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (res basics.MicroAlgos, err error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.MicroAlgos{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br.MinBalance(proto), nil
-}
-
-func (c *mockCowForLogicLedger) GetAppParams(addr basics.Address, aidx basics.AppIndex) (ret basics.AppParams, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.AppParams[aidx]
- return
-}
-func (c *mockCowForLogicLedger) GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (ret basics.AssetParams, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.AssetParams[aidx]
- return
-}
-func (c *mockCowForLogicLedger) GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (ret basics.AssetHolding, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.Assets[aidx]
- return
-}
-
-func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- addr, found := c.cr[creatableLocator{cidx, ctype}]
- return addr, found, nil
-}
-
-func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- tv, found := kv[key]
- return tv, found, nil
-}
-
-func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
- return transactions.EvalDelta{}, nil
-}
-
-func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- kv[key] = value
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- delete(kv, key)
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) round() basics.Round {
- return c.rnd
-}
-
-func (c *mockCowForLogicLedger) prevTimestamp() int64 {
- return c.ts
-}
-
-func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- _, found := c.stores[storeLocator{addr, aidx, global}]
- return found, nil
-}
-
-func (c *mockCowForLogicLedger) txnCounter() uint64 {
- return c.txc
-}
-
-func (c *mockCowForLogicLedger) incTxnCount() {
- c.txc++
-}
-
-// No unit tests care about this yet, so this is a lame implementation
-func (c *mockCowForLogicLedger) blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
- return bookkeeping.BlockHeader{Round: round}, nil
-}
-
-func newCowMock(creatables []modsData) *mockCowForLogicLedger {
- var m mockCowForLogicLedger
- m.cr = make(map[creatableLocator]basics.Address, len(creatables))
- for _, e := range creatables {
- m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
- }
- return &m
-}
-
-func TestLogicLedgerMake(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- c := &mockCowForLogicLedger{}
- l := newLogicLedger(c)
- a.NotNil(l)
- a.Equal(c, l.cow)
-}
-
-func TestLogicLedgerBalances(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- c := newCowMock(nil)
- l := newLogicLedger(c)
- a.NotNil(l)
-
- addr1 := ledgertesting.RandomAddress()
- ble := basics.MicroAlgos{Raw: 100}
- c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
- acct, err := l.AccountData(addr1)
- a.NoError(err)
- a.Equal(ble, acct.MicroAlgos)
-}
-
-func TestLogicLedgerGetters(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l := newLogicLedger(c)
- a.NotNil(l)
-
- round := basics.Round(1234)
- c.rnd = round
- ts := int64(11223344)
- c.ts = ts
-
- addr1 := ledgertesting.RandomAddress()
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
- a.Equal(round, l.Round())
- a.Equal(ts, l.LatestTimestamp())
- a.True(l.OptedIn(addr1, aidx))
- a.False(l.OptedIn(addr, aidx))
-}
-
-func TestLogicLedgerAsset(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- addr1 := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- _, _, err := l.AssetParams(basics.AssetIndex(aidx))
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
- }
-
- ap, creator, err := l.AssetParams(assetIdx)
- a.NoError(err)
- a.Equal(addr1, creator)
- a.Equal(uint64(1000), ap.Total)
-
- _, err = l.AssetHolding(addr1, assetIdx)
- a.Error(err)
- a.Contains(err.Error(), "has not opted in to asset")
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {
- AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
- Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
- },
- }
-
- ah, err := l.AssetHolding(addr1, assetIdx)
- a.NoError(err)
- a.Equal(uint64(99), ah.Amount)
-}
-
-func TestLogicLedgerGetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- addr1 := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
- val, ok, err := l.GetGlobal(aidx, "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- val, ok, err = l.GetGlobal(aidx, "gkey")
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-}
-
-func TestLogicLedgerSetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- err := l.SetGlobal(aidx, "gkey", tv)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.SetGlobal(aidx, "gkey", tv2)
- a.NoError(err)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- err = l.SetLocal(addr, aidx, "lkey", tv2, 0)
- a.NoError(err)
-}
-
-func TestLogicLedgerDelKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- err := l.DelGlobal(aidx, "gkey")
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.DelGlobal(aidx, "gkey")
- a.NoError(err)
-
- addr1 := ledgertesting.RandomAddress()
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
- err = l.DelLocal(addr1, aidx, "lkey", 0)
- a.NoError(err)
-}
diff --git a/ledger/internal/assetcow.go b/ledger/internal/assetcow.go
index 355cd6d54..f675d823c 100644
--- a/ledger/internal/assetcow.go
+++ b/ledger/internal/assetcow.go
@@ -23,22 +23,28 @@ import (
func (cs *roundCowState) AllocateAsset(addr basics.Address, index basics.AssetIndex, global bool) error {
if global {
- cs.mods.Creatables[basics.CreatableIndex(index)] = ledgercore.ModifiedCreatable{
- Ctype: basics.AssetCreatable,
- Creator: addr,
- Created: true,
- }
+ cs.mods.AddCreatable(
+ basics.CreatableIndex(index),
+ ledgercore.ModifiedCreatable{
+ Ctype: basics.AssetCreatable,
+ Creator: addr,
+ Created: true,
+ },
+ )
}
return nil
}
func (cs *roundCowState) DeallocateAsset(addr basics.Address, index basics.AssetIndex, global bool) error {
if global {
- cs.mods.Creatables[basics.CreatableIndex(index)] = ledgercore.ModifiedCreatable{
- Ctype: basics.AssetCreatable,
- Creator: addr,
- Created: false,
- }
+ cs.mods.AddCreatable(
+ basics.CreatableIndex(index),
+ ledgercore.ModifiedCreatable{
+ Ctype: basics.AssetCreatable,
+ Creator: addr,
+ Created: false,
+ },
+ )
}
return nil
}
diff --git a/ledger/internal/cow.go b/ledger/internal/cow.go
index 0baaa9fc6..d329bfb17 100644
--- a/ledger/internal/cow.go
+++ b/ledger/internal/cow.go
@@ -51,7 +51,7 @@ type roundCowParent interface {
lookupAssetHolding(addr basics.Address, aidx basics.AssetIndex, cacheOnly bool) (ledgercore.AssetHoldingDelta, bool, error)
checkDup(basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
- txnCounter() uint64
+ Counter() uint64
getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
GetStateProofNextRound() basics.Round
BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
@@ -62,6 +62,8 @@ type roundCowParent interface {
getStorageLimits(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error)
+
+ kvGet(key string) ([]byte, bool, error)
}
type roundCowState struct {
@@ -76,7 +78,7 @@ type roundCowState struct {
// storage deltas populated as side effects of AppCall transaction
// 1. Opt-in/Close actions (see Allocate/Deallocate)
- // 2. Stateful TEAL evaluation (see SetKey/DelKey)
+ // 2. Stateful TEAL evaluation (see setKey/delKey)
// must be incorporated into mods.accts before passing deltas forward
sdeltas map[basics.Address]map[storagePtr]*storageDelta
@@ -105,7 +107,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto conf
// local delta has account index as it specified in TEAL either in set/del key or prior get key calls.
// The predicate is that complex in order to cover all the block seen on testnet and mainnet.
compatibilityMode := (hdr.CurrentProtocol == protocol.ConsensusV24) &&
- (hdr.NextProtocol != protocol.ConsensusV26 || (hdr.UpgradePropose == "" && hdr.UpgradeApprove == false && hdr.Round < hdr.UpgradeState.NextProtocolVoteBefore))
+ (hdr.NextProtocol != protocol.ConsensusV26 || (hdr.UpgradePropose == "" && !hdr.UpgradeApprove && hdr.Round < hdr.UpgradeState.NextProtocolVoteBefore))
if compatibilityMode {
cb.compatibilityMode = true
cb.compatibilityGetKeyCache = make(map[basics.Address]map[storagePtr]uint64)
@@ -114,10 +116,6 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto conf
}
func (cb *roundCowState) deltas() ledgercore.StateDelta {
- if len(cb.sdeltas) == 0 {
- return cb.mods
- }
-
// Apply storage deltas to account deltas
for addr, smap := range cb.sdeltas {
for aapp, storeDelta := range smap {
@@ -126,6 +124,17 @@ func (cb *roundCowState) deltas() ledgercore.StateDelta {
}
}
}
+
+ // Populate old values by looking through parent
+ for key, value := range cb.mods.KvMods {
+ old, _, err := cb.lookupParent.kvGet(key) // Because of how boxes are prefetched, value will be cached
+ if err != nil {
+ panic(fmt.Errorf("Error looking up %v : %w", key, err))
+ }
+ value.OldData = old
+ cb.mods.KvMods[key] = value
+ }
+
return cb.mods
}
@@ -133,11 +142,11 @@ func (cb *roundCowState) rewardsLevel() uint64 {
return cb.mods.Hdr.RewardsLevel
}
-func (cb *roundCowState) round() basics.Round {
+func (cb *roundCowState) Round() basics.Round {
return cb.mods.Hdr.Round
}
-func (cb *roundCowState) prevTimestamp() int64 {
+func (cb *roundCowState) PrevTimestamp() int64 {
return cb.mods.PrevTimestamp
}
@@ -213,8 +222,8 @@ func (cb *roundCowState) checkDup(firstValid, lastValid basics.Round, txid trans
return cb.lookupParent.checkDup(firstValid, lastValid, txid, txl)
}
-func (cb *roundCowState) txnCounter() uint64 {
- return cb.lookupParent.txnCounter() + cb.txnCount
+func (cb *roundCowState) Counter() uint64 {
+ return cb.lookupParent.Counter() + cb.txnCount
}
func (cb *roundCowState) GetStateProofNextRound() basics.Round {
@@ -240,7 +249,7 @@ func (cb *roundCowState) addTx(txn transactions.Transaction, txid transactions.T
cb.mods.Txids[txid] = ledgercore.IncludedTransactions{LastValid: txn.LastValid, Intra: uint64(len(cb.mods.Txids))}
cb.incTxnCount()
if txn.Lease != [32]byte{} {
- cb.mods.Txleases[ledgercore.Txlease{Sender: txn.Sender, Lease: txn.Lease}] = txn.LastValid
+ cb.mods.AddTxLease(ledgercore.Txlease{Sender: txn.Sender, Lease: txn.Lease}, txn.LastValid)
}
}
@@ -274,10 +283,10 @@ func (cb *roundCowState) commitToParent() {
cb.commitParent.txnCount += cb.txnCount
for txl, expires := range cb.mods.Txleases {
- cb.commitParent.mods.Txleases[txl] = expires
+ cb.commitParent.mods.AddTxLease(txl, expires)
}
for cidx, delta := range cb.mods.Creatables {
- cb.commitParent.mods.Creatables[cidx] = delta
+ cb.commitParent.mods.AddCreatable(cidx, delta)
}
for addr, smod := range cb.sdeltas {
for aapp, nsd := range smod {
@@ -294,6 +303,10 @@ func (cb *roundCowState) commitToParent() {
}
}
cb.commitParent.mods.StateProofNext = cb.mods.StateProofNext
+
+ for key, value := range cb.mods.KvMods {
+ cb.commitParent.mods.AddKvMod(key, value)
+ }
}
func (cb *roundCowState) modifiedAccounts() []basics.Address {
diff --git a/ledger/internal/cow_test.go b/ledger/internal/cow_test.go
index 32e6a36e4..bd942d63e 100644
--- a/ledger/internal/cow_test.go
+++ b/ledger/internal/cow_test.go
@@ -85,7 +85,11 @@ func (ml *mockLedger) getKey(addr basics.Address, aidx basics.AppIndex, global b
return basics.TealValue{}, false, nil
}
-func (ml *mockLedger) txnCounter() uint64 {
+func (ml *mockLedger) kvGet(key string) ([]byte, bool, error) {
+ return nil, false, nil
+}
+
+func (ml *mockLedger) Counter() uint64 {
return 0
}
diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go
index f2750d8a0..b42d24f6b 100644
--- a/ledger/internal/eval.go
+++ b/ledger/internal/eval.go
@@ -45,6 +45,7 @@ type LedgerForCowBase interface {
LookupWithoutRewards(basics.Round, basics.Address) (ledgercore.AccountData, basics.Round, error)
LookupAsset(basics.Round, basics.Address, basics.AssetIndex) (ledgercore.AssetResource, error)
LookupApplication(basics.Round, basics.Address, basics.AppIndex) (ledgercore.AppResource, error)
+ LookupKv(basics.Round, string) ([]byte, error)
GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
}
@@ -132,6 +133,9 @@ type roundCowBase struct {
// Similar cache for asset/app creators.
creators map[creatable]foundAddress
+
+ // Similar cache for kv entries. A nil entry means ledger has no such pair
+ kvStore map[string][]byte
}
func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, stateProofNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
@@ -147,6 +151,7 @@ func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, sta
appLocalStates: make(map[ledgercore.AccountApp]cachedAppLocalState),
assets: make(map[ledgercore.AccountAsset]cachedAssetHolding),
creators: make(map[creatable]foundAddress),
+ kvStore: make(map[string][]byte),
}
}
@@ -320,7 +325,7 @@ func (x *roundCowBase) checkDup(firstValid, lastValid basics.Round, txid transac
return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, txl)
}
-func (x *roundCowBase) txnCounter() uint64 {
+func (x *roundCowBase) Counter() uint64 {
return x.txnCount
}
@@ -598,6 +603,7 @@ type LedgerForEvaluator interface {
GenesisProto() config.ConsensusParams
LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error)
+ FlushCaches()
}
// EvaluatorOptions defines the evaluator creation options
@@ -832,11 +838,9 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
return fmt.Errorf("group size %d exceeds maximum %d", len(txgroup), eval.proto.MaxTxGroupSize)
}
- cow := eval.state.child(len(txgroup))
-
var group transactions.TxGroup
for gi, txn := range txgroup {
- err := eval.TestTransaction(txn, cow)
+ err := eval.TestTransaction(txn)
if err != nil {
return err
}
@@ -871,7 +875,7 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
// TestTransaction performs basic duplicate detection and well-formedness checks
// on a single transaction, but does not actually add the transaction to the block
// evaluator, or modify the block evaluator state in any other visible way.
-func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
+func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn) error {
// Transaction valid (not expired)?
err := txn.Txn.Alive(eval.block)
if err != nil {
@@ -885,7 +889,7 @@ func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *rou
// Transaction already in the ledger?
txid := txn.ID()
- err = cow.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
+ err = eval.state.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
if err != nil {
return err
}
@@ -1062,7 +1066,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// Apply the transaction, updating the cow balances
- applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.txnCounter())
+ applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.Counter())
if err != nil {
return fmt.Errorf("transaction %v: %w", txid, err)
}
@@ -1125,7 +1129,7 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *r
err = apply.Payment(tx.PaymentTxnFields, tx.Header, cow, eval.specials, &ad)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, cow, eval.specials, &ad, cow.round())
+ err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, cow, eval.specials, &ad, cow.Round())
case protocol.AssetConfigTx:
err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, cow, eval.specials, &ad, ctr)
@@ -1199,7 +1203,7 @@ func (eval *BlockEvaluator) stateProofVotersAndTotal() (root crypto.GenericDiges
// TestingTxnCounter - the method returns the current evaluator transaction counter. The method is used for testing purposes only.
func (eval *BlockEvaluator) TestingTxnCounter() uint64 {
- return eval.state.txnCounter()
+ return eval.state.Counter()
}
// Call "endOfBlock" after all the block's rewards and transactions are processed.
@@ -1212,7 +1216,7 @@ func (eval *BlockEvaluator) endOfBlock() error {
}
if eval.proto.TxnCounter {
- eval.block.TxnCounter = eval.state.txnCounter()
+ eval.block.TxnCounter = eval.state.Counter()
} else {
eval.block.TxnCounter = 0
}
@@ -1255,7 +1259,7 @@ func (eval *BlockEvaluator) endOfBlock() error {
var expectedTxnCount uint64
if eval.proto.TxnCounter {
- expectedTxnCount = eval.state.txnCounter()
+ expectedTxnCount = eval.state.Counter()
}
if eval.block.TxnCounter != expectedTxnCount {
return fmt.Errorf("txn count wrong: %d != %d", eval.block.TxnCounter, expectedTxnCount)
@@ -1443,6 +1447,12 @@ func (eval *BlockEvaluator) GenerateBlock() (*ledgercore.ValidatedBlock, error)
return &vb, nil
}
+// SetGenerateForTesting is exported so that a ledger being used for testing can
+// force a block evalator to create a block and compare it to another.
+func (eval *BlockEvaluator) SetGenerateForTesting(g bool) {
+ eval.generate = g
+}
+
type evalTxValidator struct {
txcache verify.VerifiedTransactionCache
block bookkeeping.Block
@@ -1491,6 +1501,9 @@ func (validator *evalTxValidator) run() {
// AddBlock: Eval(context.Background(), l, blk, false, txcache, nil)
// tracker: Eval(context.Background(), l, blk, false, txcache, nil)
func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
+ // flush the pending writes in the cache to make everything read so far available during eval
+ l.FlushCaches()
+
eval, err := StartEvaluator(l, blk.BlockHeader,
EvaluatorOptions{
PaysetHint: len(blk.Payset),
@@ -1549,45 +1562,57 @@ transactionGroupLoop:
if !ok {
break transactionGroupLoop
} else if txgroup.Err != nil {
- return ledgercore.StateDelta{}, txgroup.Err
+ logging.Base().Errorf("eval prefetcher error: %v", txgroup.Err)
}
- for _, br := range txgroup.Accounts {
- if _, have := base.accounts[*br.Address]; !have {
- base.accounts[*br.Address] = *br.Data
- }
- }
- for _, lr := range txgroup.Resources {
- if lr.Address == nil {
- // we attempted to look for the creator, and failed.
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: lr.CreatableType}] =
- foundAddress{exists: false}
- continue
- }
- if lr.CreatableType == basics.AssetCreatable {
- if lr.Resource.AssetHolding != nil {
- base.assets[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetHolding{value: *lr.Resource.AssetHolding, exists: true}
- } else {
- base.assets[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetHolding{exists: false}
+ if txgroup.Err == nil {
+ for _, br := range txgroup.Accounts {
+ if _, have := base.accounts[*br.Address]; !have {
+ base.accounts[*br.Address] = *br.Data
}
- if lr.Resource.AssetParams != nil {
- base.assetParams[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetParams{value: *lr.Resource.AssetParams, exists: true}
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: basics.AssetCreatable}] = foundAddress{address: *lr.Address, exists: true}
- } else {
- base.assetParams[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetParams{exists: false}
-
- }
- } else {
- if lr.Resource.AppLocalState != nil {
- base.appLocalStates[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppLocalState{value: *lr.Resource.AppLocalState, exists: true}
- } else {
- base.appLocalStates[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppLocalState{exists: false}
+ }
+ for _, lr := range txgroup.Resources {
+ if lr.Address == nil {
+ // we attempted to look for the creator, and failed.
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: lr.CreatableType}
+ base.creators[creatableKey] = foundAddress{exists: false}
+ continue
}
- if lr.Resource.AppParams != nil {
- base.appParams[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppParams{value: *lr.Resource.AppParams, exists: true}
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: basics.AppCreatable}] = foundAddress{address: *lr.Address, exists: true}
+ if lr.CreatableType == basics.AssetCreatable {
+ assetKey := ledgercore.AccountAsset{
+ Address: *lr.Address,
+ Asset: basics.AssetIndex(lr.CreatableIndex),
+ }
+
+ if lr.Resource.AssetHolding != nil {
+ base.assets[assetKey] = cachedAssetHolding{value: *lr.Resource.AssetHolding, exists: true}
+ } else {
+ base.assets[assetKey] = cachedAssetHolding{exists: false}
+ }
+ if lr.Resource.AssetParams != nil {
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: basics.AssetCreatable}
+ base.assetParams[assetKey] = cachedAssetParams{value: *lr.Resource.AssetParams, exists: true}
+ base.creators[creatableKey] = foundAddress{address: *lr.Address, exists: true}
+ } else {
+ base.assetParams[assetKey] = cachedAssetParams{exists: false}
+ }
} else {
- base.appParams[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppParams{exists: false}
+ appKey := ledgercore.AccountApp{
+ Address: *lr.Address,
+ App: basics.AppIndex(lr.CreatableIndex),
+ }
+ if lr.Resource.AppLocalState != nil {
+ base.appLocalStates[appKey] = cachedAppLocalState{value: *lr.Resource.AppLocalState, exists: true}
+ } else {
+ base.appLocalStates[appKey] = cachedAppLocalState{exists: false}
+ }
+ if lr.Resource.AppParams != nil {
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: basics.AppCreatable}
+ base.appParams[appKey] = cachedAppParams{value: *lr.Resource.AppParams, exists: true}
+ base.creators[creatableKey] = foundAddress{address: *lr.Address, exists: true}
+ } else {
+ base.appParams[appKey] = cachedAppParams{exists: false}
+ }
}
}
}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
deleted file mode 100644
index b6eb6b9c1..000000000
--- a/ledger/internal/eval_blackbox_test.go
+++ /dev/null
@@ -1,1256 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal_test
-
-import (
- "context"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
- "github.com/algorand/go-algorand/ledger/internal"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-var minFee basics.MicroAlgos
-
-func init() {
- params := config.Consensus[protocol.ConsensusCurrentVersion]
- minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
-}
-
-func TestBlockEvaluator(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, keys := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
- require.NoError(t, err)
- newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- genHash := l.GenesisHash()
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[1],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
-
- // Correct signature should work
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- // Broken signature should fail
- stbad := st
- st.Sig[2] ^= 8
- txgroup := []transactions.SignedTxn{stbad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- // Repeat should fail
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // out of range should fail
- btxn := txn
- btxn.FirstValid++
- btxn.LastValid += 2
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // bogus group should fail
- btxn = txn
- btxn.Group[1] = 1
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // mixed fields should fail
- btxn = txn
- btxn.XferAsset = 3
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
- // err = eval.Transaction(st, transactions.ApplyData{})
- // require.Error(t, err)
-
- selfTxn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[2],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[2],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := selfTxn.Sign(keys[2])
-
- // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
- txgroup = []transactions.SignedTxn{stxn}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- err = eval.Transaction(stxn, transactions.ApplyData{})
- require.NoError(t, err)
-
- t3 := txn
- t3.Amount.Raw++
- t4 := selfTxn
- t4.Amount.Raw++
-
- // a group without .Group should fail
- s3 := t3.Sign(keys[0])
- s4 := t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // Test a group that should work
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
- t3.Group = crypto.HashObj(group)
- t4.Group = t3.Group
- s3 = t3.Sign(keys[0])
- s4 = t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- // disagreement on Group id should fail
- t4bad := t4
- t4bad.Group[3] ^= 3
- s4bad := t4bad.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4bad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // missing part of the group should fail
- txgroup = []transactions.SignedTxn{s3}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
-
- accts := genesisInitState.Accounts
- bal0 := accts[addrs[0]]
- bal1 := accts[addrs[1]]
- bal2 := accts[addrs[2]]
-
- l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
-
- bal0new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[0])
- require.NoError(t, err)
- bal1new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[1])
- require.NoError(t, err)
- bal2new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[2])
- require.NoError(t, err)
-
- require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
- require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
- require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
-}
-
-func TestRekeying(t *testing.T) {
- partitiontest.PartitionTest(t)
- // t.Parallel() NO! This test manipulates []protocol.Consensus
-
- // Pretend rekeying is supported
- actual := config.Consensus[protocol.ConsensusCurrentVersion]
- pretend := actual
- pretend.SupportRekeying = true
- config.Consensus[protocol.ConsensusCurrentVersion] = pretend
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = actual
- }()
-
- // Bring up a ledger
- genesisInitState, addrs, keys := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- // Make a new block
- nextRound := l.Latest() + basics.Round(1)
- genHash := l.GenesisHash()
-
- // Test plan
- // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
- makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: nextRound,
- LastValid: nextRound,
- GenesisHash: genHash,
- RekeyTo: rekeyto,
- Note: []byte{uniq},
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: sender,
- },
- }
- sig := signer.Sign(txn)
- return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
- }
-
- tryBlock := func(stxns []transactions.SignedTxn) error {
- // We'll make a block using the evaluator.
- // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
- // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
- genesisHdr, err := l.BlockHdr(basics.Round(0))
- require.NoError(t, err)
- newBlock := bookkeeping.MakeBlock(genesisHdr)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- for _, stxn := range stxns {
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- return err
- }
- }
- validatedBlock, err := eval.GenerateBlock()
- if err != nil {
- return err
- }
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
- _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
- return err
- }
-
- // Preamble transactions, which all of the blocks in this test will start with
- // [A -> 0][0,A] (normal transaction)
- // [A -> B][0,A] (rekey)
- txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
- txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
-
- // Test 1: Do only good things
- // (preamble)
- // [A -> 0][B,B] (normal transaction using new key)
- // [A -> A][B,B] (rekey back to A, transaction still signed by B)
- // [A -> 0][0,A] (normal transaction again)
- test1txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
- makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
- makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
- }
- err = tryBlock(test1txns)
- require.NoError(t, err)
-
- // Test 2: Use old key after rekeying
- // (preamble)
- // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
- test2txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
- }
- err = tryBlock(test2txns)
- require.Error(t, err)
-
- // TODO: More tests
-}
-
-// TestEvalAppState ensures txns in a group can't violate app state schema
-// limits the test ensures that commitToParent -> applyChild copies child's cow
-// state usage counts into parent and the usage counts correctly propagated from
-// parent cow to child cow and back. When limits are not violated, the test
-// ensures that the updates are correct.
-func TestEvalAppState(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v24 = apps
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- appcall1 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[0],
- GlobalStateSchema: basics.StateSchema{NumByteSlice: 1},
- ApprovalProgram: `#pragma version 2
- txn ApplicationID
- bz create
- byte "caller"
- txn Sender
- app_global_put
- b ok
-create:
- byte "creator"
- txn Sender
- app_global_put
-ok:
- int 1`,
- ClearStateProgram: "#pragma version 2\nint 1",
- }
-
- appcall2 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[0],
- ApplicationID: 1,
- }
-
- dl.beginBlock()
- dl.txgroup("store bytes count 2 exceeds schema bytes count 1", &appcall1, &appcall2)
-
- appcall1.GlobalStateSchema = basics.StateSchema{NumByteSlice: 2}
- dl.txgroup("", &appcall1, &appcall2)
- vb := dl.endBlock()
- deltas := vb.Delta()
-
- params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
- require.True(t, ok)
- state := params.Params.GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
- })
-}
-
-// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func nextBlock(t testing.TB, ledger *ledger.Ledger) *internal.BlockEvaluator {
- rnd := ledger.Latest()
- hdr, err := ledger.BlockHdr(rnd)
- require.NoError(t, err)
-
- nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
- nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
- eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
- Generate: true,
- Validate: true, // Do the complete checks that a new txn would be subject to
- })
- require.NoError(t, err)
- return eval
-}
-
-func fillDefaults(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
- if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
- txn.GenesisHash = ledger.GenesisHash()
- }
- if txn.FirstValid == 0 {
- txn.FirstValid = eval.Round()
- }
-
- txn.FillDefaults(ledger.GenesisProto())
-}
-
-func txns(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
- t.Helper()
- for _, txn1 := range txns {
- txn(t, ledger, eval, txn1)
- }
-}
-
-func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
- t.Helper()
- fillDefaults(t, ledger, eval, txn)
- err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
- if err != nil {
- if len(problem) == 1 && problem[0] != "" {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- require.True(t, len(problem) == 0 || problem[0] == "")
-}
-
-func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
- t.Helper()
- for _, txn := range txns {
- fillDefaults(t, ledger, eval, txn)
- }
- txgroup := txntest.SignedTxns(txns...)
-
- return eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
-}
-
-func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
- genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, consensusVersion)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- eval := nextBlock(t, l)
-
- appcall1 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- GlobalStateSchema: schema,
- ApprovalProgram: approvalProgram,
- }
-
- appcall2 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- appcall3 := txntest.Txn{
- Sender: addrs[1],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
-}
-
-// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
-// budgets in a group txn and return an error if the budget is exceeded
-func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- source := func(n int, m int) string {
- return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
- strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
- }
-
- params := []protocol.ConsensusVersion{
- protocol.ConsensusV29,
- protocol.ConsensusFuture,
- }
-
- cases := []struct {
- prog string
- isSuccessV29 bool
- isSuccessVFuture bool
- expectedErrorV29 string
- expectedErrorVFuture string
- }{
- {source(5, 47), true, true,
- "",
- ""},
- {source(5, 48), false, true,
- "pc=157 dynamic cost budget exceeded, executing pushint",
- ""},
- {source(16, 17), false, true,
- "pc= 12 dynamic cost budget exceeded, executing keccak256",
- ""},
- {source(16, 18), false, false,
- "pc= 12 dynamic cost budget exceeded, executing keccak256",
- "pc= 78 dynamic cost budget exceeded, executing pushint"},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
- if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorV29)
- } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
- }
- })
- }
- }
-}
-
-// endBlock completes the block being created, returns the ValidatedBlock for inspection
-func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
- err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(t, err)
- // `rndBQ` gives the latest known block round added to the ledger
- // we should wait until `rndBQ` block to be committed to blockQueue,
- // in case there is a data race, noted in
- // https://github.com/algorand/go-algorand/issues/4349
- // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
- // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
- // A solution here would be wait until all new added blocks are committed,
- // then we return the result and continue the execution.
- rndBQ := ledger.Latest()
- ledger.WaitForCommit(rndBQ)
- return validatedBlock
-}
-
-// lookup gets the current accountdata for an address
-func lookup(t testing.TB, ledger *ledger.Ledger, addr basics.Address) basics.AccountData {
- ad, _, _, err := ledger.LookupLatest(addr)
- require.NoError(t, err)
- return ad
-}
-
-// micros gets the current microAlgo balance for an address
-func micros(t testing.TB, ledger *ledger.Ledger, addr basics.Address) uint64 {
- return lookup(t, ledger, addr).MicroAlgos.Raw
-}
-
-// holding gets the current balance and optin status for some asa for an address
-func holding(t testing.TB, ledger *ledger.Ledger, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
- if holding, ok := lookup(t, ledger, addr).Assets[asset]; ok {
- return holding.Amount, true
- }
- return 0, false
-}
-
-// asaParams gets the asset params for a given asa index
-func asaParams(t testing.TB, ledger *ledger.Ledger, asset basics.AssetIndex) (basics.AssetParams, error) {
- creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
- if err != nil {
- return basics.AssetParams{}, err
- }
- if !ok {
- return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
- }
- if params, ok := lookup(t, ledger, creator).AssetParams[asset]; ok {
- return params, nil
- }
- return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
-}
-
-func TestGarbageClearState(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v24 = apps
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "int 1",
- ClearStateProgram: []byte{},
- }
-
- dl.txn(&createTxn, "invalid program (empty)")
-
- createTxn.ClearStateProgram = []byte{0xfe} // bad uvarint
- dl.txn(&createTxn, "invalid version")
- })
-}
-
-func TestRewardsInAD(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v15 put rewards into ApplyData
- testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
- nonpartTxn := txntest.Txn{Type: protocol.KeyRegistrationTx, Sender: addrs[2], Nonparticipation: true}
- payNonPart := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[2]}
-
- if ver < 18 { // Nonpart reyreg happens in v18
- dl.txn(&nonpartTxn, "tries to mark an account as nonparticipating")
- } else {
- dl.fullBlock(&nonpartTxn)
- }
-
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- vb := dl.fullBlock(&payTxn, &payNonPart)
- payInBlock := vb.Block().Payset[0]
- nonPartInBlock := vb.Block().Payset[1]
- if ver >= 15 {
- require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
- require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
- // Sender is not due for more, and Receiver is nonpart
- require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
- if ver < 18 {
- require.Greater(t, nonPartInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- } else {
- require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
- }
- } else {
- require.Zero(t, payInBlock.ApplyData.SenderRewards)
- require.Zero(t, payInBlock.ApplyData.ReceiverRewards)
- require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
- require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
- }
- })
-}
-
-func TestMinBalanceChanges(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 3,
- Manager: addrs[1],
- Reserve: addrs[2],
- Freeze: addrs[3],
- Clawback: addrs[4],
- },
- }
-
- const expectedID basics.AssetIndex = 1
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[5],
- }
-
- ad0init, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5init, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &createTxn, &optInTxn)
- endBlock(t, l, eval)
-
- ad0new, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5new, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
-
- proto := l.GenesisProto()
- // Check balance and min balance requirement changes
- require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
- require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[1], // The manager, not the creator
- ConfigAsset: expectedID,
- }
-
- eval = nextBlock(t, l)
- txns(t, l, eval, &optOutTxn, &closeTxn)
- endBlock(t, l, eval)
-
- ad0final, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5final, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
- // Check we got our balance "back"
- require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
- require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
-}
-
-// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
-func TestDeleteNonExistantKeys(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // AVM v2 (apps)
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- const appid basics.AppIndex = 1
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
-byte "missing_global"
-app_global_del
-int 0
-byte "missing_local"
-app_local_del
-`),
- }
-
- optInTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
- }
-
- vb := dl.fullBlock(&createTxn, &optInTxn)
- require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
- // For a while, we encoded an empty localdelta
- deltas := 1
- if ver >= 27 {
- deltas = 0
- }
- require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
- })
-}
-
-// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
-// and do not cause any MaximumMinimumBalance problems
-func TestAppInsMinBalance(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
- genesisInitState.Block.CurrentProtocol = protocol.ConsensusV30
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- maxAppsOptedIn := config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
- require.Greater(t, maxAppsOptedIn, 0)
- maxAppsCreated := config.Consensus[protocol.ConsensusV30].MaxAppsCreated
- require.Greater(t, maxAppsCreated, 0)
- maxLocalSchemaEntries := config.Consensus[protocol.ConsensusV30].MaxLocalSchemaEntries
- require.Greater(t, maxLocalSchemaEntries, uint64(0))
-
- txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
- txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
- appsCreated := make(map[basics.Address]int, len(addrs)-1)
-
- acctIdx := 0
- for i := 0; i < maxAppsOptedIn; i++ {
- creator := addrs[acctIdx]
- createTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: creator,
- ApprovalProgram: "int 1",
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- Note: ledgertesting.RandomNote(),
- }
- txnsCreate = append(txnsCreate, &createTxn)
- count := appsCreated[creator]
- count++
- appsCreated[creator] = count
- if count == maxAppsCreated {
- acctIdx++
- }
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[9],
- ApplicationID: appid + basics.AppIndex(i),
- OnCompletion: transactions.OptInOC,
- }
- txnsOptIn = append(txnsOptIn, &optInTxn)
- }
-
- eval := nextBlock(t, l)
- txns1 := append(txnsCreate, txnsOptIn...)
- txns(t, l, eval, txns1...)
- vb := endBlock(t, l, eval)
- mods := vb.Delta()
- appAppResources := mods.Accts.GetAllAppResources()
- appParamsCount := 0
- appLocalStatesCount := 0
- for _, ap := range appAppResources {
- if ap.Params.Params != nil {
- appParamsCount++
- }
- if ap.State.LocalState != nil {
- appLocalStatesCount++
- }
- }
- require.Equal(t, appLocalStatesCount, 50)
- require.Equal(t, appParamsCount, 50)
-}
-
-func TestDuplicates(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- pay := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[1],
- Amount: 10,
- }
- dl.txn(&pay)
- dl.txn(&pay, "transaction already in ledger")
-
- // Test same transaction in a later block
- dl.txn(&pay, "transaction already in ledger")
-
- // Change the note so it can go in again
- pay.Note = []byte("1")
- dl.txn(&pay)
-
- // Change note again, but try the txn twice in same group
- if dl.generator.GenesisProto().MaxTxGroupSize > 1 {
- pay.Note = []byte("2")
- dl.txgroup("transaction already in ledger", &pay, &pay)
- }
- })
-}
-
-var consensusByNumber = []protocol.ConsensusVersion{
- "", "", "", "", "", "", "",
- protocol.ConsensusV7,
- protocol.ConsensusV8,
- protocol.ConsensusV9,
- protocol.ConsensusV10,
- protocol.ConsensusV11, // first with viable payset commit type
- protocol.ConsensusV12,
- protocol.ConsensusV13,
- protocol.ConsensusV14,
- protocol.ConsensusV15, // rewards in AD
- protocol.ConsensusV16,
- protocol.ConsensusV17,
- protocol.ConsensusV18,
- protocol.ConsensusV19,
- protocol.ConsensusV20,
- protocol.ConsensusV21,
- protocol.ConsensusV22,
- protocol.ConsensusV23,
- protocol.ConsensusV24, // AVM v2 (apps)
- protocol.ConsensusV25,
- protocol.ConsensusV26,
- protocol.ConsensusV27,
- protocol.ConsensusV28,
- protocol.ConsensusV29,
- protocol.ConsensusV30, // AVM v5 (inner txs)
- protocol.ConsensusV31, // AVM v6 (inner txs with appls)
- protocol.ConsensusV32, // unlimited assets and apps
- protocol.ConsensusV33, // 320 rounds
- protocol.ConsensusV34, // AVM v7, stateproofs
- protocol.ConsensusV35, // stateproofs stake fix
- protocol.ConsensusFuture,
-}
-
-// TestReleasedVersion ensures that the necessary tidying is done when a new
-// protocol release happens. The new version must be added to
-// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
-func TestReleasedVersion(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- // This confirms that the proto before future has no ApprovedUpgrades. Once
- // it does, that new version should be added to consensusByNumber.
- require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
- // And no funny business with vFuture
- require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
-
- // Ensure that vFuture gets a new LogicSigVersion when we promote the
- // existing one. That allows TestExperimental in the logic package to
- // prevent unintended releases of experimental opcodes.
- relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
- futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
- require.Equal(t, relV+1, futureV)
-}
-
-// testConsensusRange allows for running tests against a range of consensus
-// versions. Generally `start` will be the version that introduced the feature,
-// and `stop` will be 0 to indicate it should work right on up through vFuture.
-// `stop` will be an actual version number if we're confirming that something
-// STOPS working as of a particular version. When writing the test for a new
-// feature that is currently in vFuture, use the expected version number as
-// `start`. That will correspond to vFuture until a new consensus version is
-// created and inserted in consensusByNumber. At that point, your feature is
-// probably active in that version. (If it's being held in vFuture, just
-// increment your `start`.)
-func testConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int)) {
- if stop == 0 { // Treat 0 as "future"
- stop = len(consensusByNumber) - 1
- }
- for i := start; i <= stop; i++ {
- var version string
- if i == len(consensusByNumber)-1 {
- version = "vFuture"
- } else {
- version = fmt.Sprintf("v%d", i)
- }
- t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) { test(t, i) })
- }
-}
-
-func benchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, ver int)) {
- if stop == 0 { // Treat 0 as "future"
- stop = len(consensusByNumber) - 1
- }
- for i := start; i <= stop; i++ {
- var version string
- if i == len(consensusByNumber)-1 {
- version = "vFuture"
- } else {
- version = fmt.Sprintf("v%d", i)
- }
- b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) { bench(b, i) })
- }
-}
-
-// TestHeaderAccess tests FirstValidTime and `block` which can access previous
-// block headers.
-func TestHeaderAccess(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // Added in v34
- testConsensusRange(t, 34, 0, func(t *testing.T, ver int) {
- cv := consensusByNumber[ver]
- dl := NewDoubleLedger(t, genBalances, cv)
- defer dl.Close()
-
- fvt := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- FirstValid: 0,
- ApprovalProgram: "txn FirstValidTime",
- }
- dl.txn(&fvt, "round 0 is not available")
-
- // advance current to 2
- pay := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}
- dl.fullBlock(&pay)
-
- fvt.FirstValid = 1
- dl.txn(&fvt, "round 0 is not available")
-
- fvt.FirstValid = 2
- dl.txn(&fvt) // current becomes 3
-
- // Advance current round far enough to test access MaxTxnLife ago
- for i := 0; i < int(config.Consensus[cv].MaxTxnLife); i++ {
- dl.fullBlock()
- }
-
- // current should be 1003. Confirm.
- require.EqualValues(t, 1002, dl.generator.Latest())
- require.EqualValues(t, 1002, dl.validator.Latest())
-
- fvt.FirstValid = 1003
- fvt.LastValid = 1010
- dl.txn(&fvt) // success advances the round
- // now we're confident current is 1004, so construct a txn that is as
- // old as possible, and confirm access.
- fvt.FirstValid = 1004 - basics.Round(config.Consensus[cv].MaxTxnLife)
- fvt.LastValid = 1004
- dl.txn(&fvt)
- })
-
-}
-
-// TestLogsInBlock ensures that logs appear in the block properly
-func TestLogsInBlock(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // Run tests from v30 onward
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "byte \"APP\"\n log\n int 1",
- // Fail the clear state
- ClearStateProgram: "byte \"CLR\"\n log\n int 0",
- }
- vb := dl.fullBlock(&createTxn)
- createInBlock := vb.Block().Payset[0]
- appID := createInBlock.ApplyData.ApplicationID
- require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appID,
- OnCompletion: transactions.OptInOC,
- }
- vb = dl.fullBlock(&optInTxn)
- optInInBlock := vb.Block().Payset[0]
- require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
-
- clearTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appID,
- OnCompletion: transactions.ClearStateOC,
- }
- vb = dl.fullBlock(&clearTxn)
- clearInBlock := vb.Block().Payset[0]
- // Logs do not appear if the ClearState failed
- require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
- })
-}
-
-// TestUnfundedSenders confirms that accounts that don't even exist
-// can be the Sender in some situations. If some other transaction
-// covers the fee, and the transaction itself does not require an
-// asset or a min balance, it's fine.
-func TestUnfundedSenders(t *testing.T) {
- /*
- In a 0-fee transaction from unfunded sender, we still call balances.Move
- to “pay” the fee. Move() does not short-circuit a Move of 0 (for good
- reason, it allows compounding rewards). Therefore, in Move, we do
- rewards processing on the unfunded account. Before
- proto.UnfundedSenders, the rewards procesing would set the RewardsBase,
- which would require the account be written to DB, and therefore the MBR
- check would kick in (and fail). Now it skips the update if the account
- has less than RewardsUnit, as the update is meaningless anyway.
- */
-
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
-
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- asaIndex := basics.AssetIndex(1)
-
- ghost := basics.Address{0x01}
-
- asaCreate := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 10,
- Clawback: ghost,
- Freeze: ghost,
- Manager: ghost,
- },
- }
-
- appCreate := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- }
-
- dl.fullBlock(&asaCreate, &appCreate)
-
- // Advance so that rewardsLevel increases
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- fmt.Printf("addrs[0] = %+v\n", addrs[0])
- fmt.Printf("addrs[1] = %+v\n", addrs[1])
-
- benefactor := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- Fee: 2000,
- }
-
- ephemeral := []txntest.Txn{
- {
- Type: "pay",
- Amount: 0,
- Sender: ghost,
- Receiver: ghost,
- Fee: 0,
- },
- { // Axfer of 0
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: basics.Address{0x02},
- XferAsset: basics.AssetIndex(1),
- Fee: 0,
- },
- { // Clawback
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: addrs[0],
- AssetSender: addrs[1],
- XferAsset: asaIndex,
- Fee: 0,
- },
- { // Freeze
- Type: "afrz",
- Sender: ghost,
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: true,
- Fee: 0,
- },
- { // Unfreeze
- Type: "afrz",
- Sender: ghost,
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: false,
- Fee: 0,
- },
- { // App call
- Type: "appl",
- Sender: ghost,
- ApplicationID: basics.AppIndex(2),
- Fee: 0,
- },
- { // App creation (only works because it's also deleted)
- Type: "appl",
- Sender: ghost,
- OnCompletion: transactions.DeleteApplicationOC,
- Fee: 0,
- },
- }
-
- // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
- var problem string
- if ver < 34 {
- // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
- problem = "balance 0 below min"
- }
- for i, e := range ephemeral {
- dl.txgroup(problem, benefactor.Noted(strconv.Itoa(i)), &e)
- }
- })
-}
-
-// TestAppCallAppDuringInit is similar to TestUnfundedSenders test, but now the
-// unfunded sender is a newly created app. The fee has been paid by the outer
-// transaction, so the app should be able to make an app call as that requires
-// no min balance.
-func TestAppCallAppDuringInit(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- approve := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- }
-
- // construct a simple app
- vb := dl.fullBlock(&approve)
-
- // now make a new app that calls it during init
- approveID := vb.Block().Payset[0].ApplicationID
-
- // Advance so that rewardsLevel increases
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- callInInit := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: `
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
- int 1
- `,
- ForeignApps: []basics.AppIndex{approveID},
- Fee: 2000, // Enough to have the inner fee paid for
- }
- // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
- var problem string
- if ver < 34 {
- // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
- problem = "balance 0 below min"
- }
- dl.txn(&callInInit, problem)
- })
-}
diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go
index 8f07612be..495ff6097 100644
--- a/ledger/internal/eval_test.go
+++ b/ledger/internal/eval_test.go
@@ -519,6 +519,8 @@ func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, payset
})
}
+func (ledger *evalTestLedger) FlushCaches() {}
+
// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
// look up a creator address, setting ok to false if the query succeeded but no
// creator was found.
@@ -577,6 +579,10 @@ func (ledger *evalTestLedger) LookupAsset(rnd basics.Round, addr basics.Address,
return res, nil
}
+func (ledger *evalTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ panic("unimplemented")
+}
+
// GenesisHash returns the genesis hash for this ledger.
func (ledger *evalTestLedger) GenesisHash() crypto.Digest {
return ledger.genesisHash
@@ -768,6 +774,10 @@ func (l *testCowBaseLedger) LookupAsset(rnd basics.Round, addr basics.Address, a
return ledgercore.AssetResource{}, errors.New("not implemented")
}
+func (l *testCowBaseLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return nil, errors.New("not implemented")
+}
+
func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
res := l.creators[0]
l.creators = l.creators[1:]
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/internal/prefetcher/prefetcher.go
index 82e0d830c..b7223806f 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/internal/prefetcher/prefetcher.go
@@ -76,7 +76,7 @@ type LoadedTransactionGroup struct {
type accountPrefetcher struct {
ledger Ledger
rnd basics.Round
- groups [][]transactions.SignedTxnWithAD
+ txnGroups [][]transactions.SignedTxnWithAD
feeSinkAddr basics.Address
consensusParams config.ConsensusParams
outChan chan LoadedTransactionGroup
@@ -84,14 +84,14 @@ type accountPrefetcher struct {
// PrefetchAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group.
// The order of the transaction groups returned by the channel is identical to the one in the input array.
-func PrefetchAccounts(ctx context.Context, l Ledger, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) <-chan LoadedTransactionGroup {
+func PrefetchAccounts(ctx context.Context, l Ledger, rnd basics.Round, txnGroups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) <-chan LoadedTransactionGroup {
prefetcher := &accountPrefetcher{
ledger: l,
rnd: rnd,
- groups: groups,
+ txnGroups: txnGroups,
feeSinkAddr: feeSinkAddr,
consensusParams: consensusParams,
- outChan: make(chan LoadedTransactionGroup, len(groups)),
+ outChan: make(chan LoadedTransactionGroup, len(txnGroups)),
}
go prefetcher.prefetch(ctx)
@@ -117,6 +117,9 @@ type groupTask struct {
resources []LoadedResourcesEntry
// resourcesCount is the number of resources that nees to be loaded per transaction group
resourcesCount int
+
+ // error while processing this group task
+ err *GroupTaskError
}
// preloaderTask manage the loading of a single element, whether it's a resource or an account address.
@@ -128,9 +131,9 @@ type preloaderTask struct {
// resource type
creatableType basics.CreatableType
// a list of transaction group tasks that depends on this address or resource
- groups []*groupTask
+ groupTasks []*groupTask
// a list of indices into the groupTask.balances or groupTask.resources where the address would be stored
- groupIndices []int
+ groupTasksIndices []int
}
// preloaderTaskQueue is a dynamic linked list of enqueued entries, optimized for non-syncronized insertion and
@@ -198,18 +201,18 @@ func loadAccountsAddAccountTask(addr *basics.Address, wt *groupTask, accountTask
}
if task, have := accountTasks[*addr]; !have {
task := &preloaderTask{
- address: addr,
- groups: make([]*groupTask, 1, 4),
- groupIndices: make([]int, 1, 4),
+ address: addr,
+ groupTasks: make([]*groupTask, 1, 4),
+ groupTasksIndices: make([]int, 1, 4),
}
- task.groups[0] = wt
- task.groupIndices[0] = wt.balancesCount
+ task.groupTasks[0] = wt
+ task.groupTasksIndices[0] = wt.balancesCount
accountTasks[*addr] = task
queue.enqueue(task)
} else {
- task.groups = append(task.groups, wt)
- task.groupIndices = append(task.groupIndices, wt.balancesCount)
+ task.groupTasks = append(task.groupTasks, wt)
+ task.groupTasksIndices = append(task.groupTasksIndices, wt.balancesCount)
}
wt.balancesCount++
}
@@ -226,20 +229,20 @@ func loadAccountsAddResourceTask(addr *basics.Address, cidx basics.CreatableInde
}
if task, have := resourceTasks[key]; !have {
task := &preloaderTask{
- address: addr,
- groups: make([]*groupTask, 1, 4),
- groupIndices: make([]int, 1, 4),
- creatableIndex: cidx,
- creatableType: ctype,
+ address: addr,
+ groupTasks: make([]*groupTask, 1, 4),
+ groupTasksIndices: make([]int, 1, 4),
+ creatableIndex: cidx,
+ creatableType: ctype,
}
- task.groups[0] = wt
- task.groupIndices[0] = wt.resourcesCount
+ task.groupTasks[0] = wt
+ task.groupTasksIndices[0] = wt.resourcesCount
resourceTasks[key] = task
queue.enqueue(task)
} else {
- task.groups = append(task.groups, wt)
- task.groupIndices = append(task.groupIndices, wt.resourcesCount)
+ task.groupTasks = append(task.groupTasks, wt)
+ task.groupTasksIndices = append(task.groupTasksIndices, wt.resourcesCount)
}
wt.resourcesCount++
}
@@ -250,6 +253,7 @@ func loadAccountsAddResourceTask(addr *basics.Address, cidx basics.CreatableInde
func (p *accountPrefetcher) prefetch(ctx context.Context) {
defer close(p.outChan)
accountTasks := make(map[basics.Address]*preloaderTask)
+ resourceTasks := make(map[accountCreatableKey]*preloaderTask)
var maxTxnGroupEntries int
if p.consensusParams.Application {
@@ -260,21 +264,21 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
maxTxnGroupEntries = p.consensusParams.MaxTxGroupSize * 8
}
- tasksQueue := allocPreloaderQueue(len(p.groups), maxTxnGroupEntries)
+ tasksQueue := allocPreloaderQueue(len(p.txnGroups), maxTxnGroupEntries)
// totalBalances counts the total number of balances over all the transaction groups
totalBalances := 0
totalResources := 0
- groupsReady := make([]groupTask, len(p.groups))
+ groupsReady := make([]groupTask, len(p.txnGroups))
// Add fee sink to the first group
- if len(p.groups) > 0 {
+ if len(p.txnGroups) > 0 {
// the feeSinkAddr is known to be non-empty
feeSinkPreloader := &preloaderTask{
- address: &p.feeSinkAddr,
- groups: []*groupTask{&groupsReady[0]},
- groupIndices: []int{0},
+ address: &p.feeSinkAddr,
+ groupTasks: []*groupTask{&groupsReady[0]},
+ groupTasksIndices: []int{0},
}
groupsReady[0].balancesCount = 1
accountTasks[p.feeSinkAddr] = feeSinkPreloader
@@ -283,21 +287,64 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
// iterate over the transaction groups and add all their account addresses to the list
queue := &tasksQueue
- for i := range p.groups {
+ for i := range p.txnGroups {
task := &groupsReady[i]
- for j := range p.groups[i] {
- stxn := &p.groups[i][j]
+ for j := range p.txnGroups[i] {
+ stxn := &p.txnGroups[i][j]
switch stxn.Txn.Type {
case protocol.PaymentTx:
loadAccountsAddAccountTask(&stxn.Txn.Receiver, task, accountTasks, queue)
loadAccountsAddAccountTask(&stxn.Txn.CloseRemainderTo, task, accountTasks, queue)
case protocol.AssetConfigTx:
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.ConfigAsset), basics.AssetCreatable, task, resourceTasks, queue)
case protocol.AssetTransferTx:
+ if !stxn.Txn.AssetSender.IsZero() {
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddResourceTask(&stxn.Txn.AssetSender, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ } else {
+ if stxn.Txn.AssetAmount == 0 && (stxn.Txn.AssetReceiver == stxn.Txn.Sender) { // opt in
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ if stxn.Txn.AssetAmount != 0 { // zero transfer is noop
+ loadAccountsAddResourceTask(&stxn.Txn.Sender, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ }
+ if !stxn.Txn.AssetReceiver.IsZero() {
+ if stxn.Txn.AssetAmount != 0 || (stxn.Txn.AssetReceiver == stxn.Txn.Sender) {
+ // if not zero transfer or opt in then prefetch
+ loadAccountsAddResourceTask(&stxn.Txn.AssetReceiver, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ }
+ if !stxn.Txn.AssetCloseTo.IsZero() {
+ loadAccountsAddResourceTask(&stxn.Txn.AssetCloseTo, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
case protocol.AssetFreezeTx:
+ if !stxn.Txn.FreezeAccount.IsZero() {
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.FreezeAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddResourceTask(&stxn.Txn.FreezeAccount, basics.CreatableIndex(stxn.Txn.FreezeAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddAccountTask(&stxn.Txn.FreezeAccount, task, accountTasks, queue)
+ }
case protocol.ApplicationCallTx:
+ if stxn.Txn.ApplicationID != 0 {
+ // load the global - so that we'll have the program
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.ApplicationID), basics.AppCreatable, task, resourceTasks, queue)
+ // load the local - so that we'll have the local state
+ // TODO: this is something we need to decide if we want to enable, since not
+ // every application call would use local storage.
+ if (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.OptInOC) ||
+ (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.CloseOutOC) ||
+ (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.ClearStateOC) {
+ loadAccountsAddResourceTask(&stxn.Txn.Sender, basics.CreatableIndex(stxn.Txn.ApplicationID), basics.AppCreatable, task, resourceTasks, queue)
+ }
+ }
+
+ // do not preload Txn.ForeignApps, Txn.ForeignAssets, Txn.Accounts
+ // since they might be non-used arbitrary values
+
case protocol.StateProofTx:
case protocol.KeyRegistrationTx:
}
+
// If you add new addresses here, also add them in getTxnAddresses().
if !stxn.Txn.Sender.IsZero() {
loadAccountsAddAccountTask(&stxn.Txn.Sender, task, accountTasks, queue)
@@ -356,24 +403,20 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
// iterate on the transaction groups tasks. This array retains the original order.
completed := make(map[int64]bool)
- for i := int64(0); i < int64(len(p.groups)); {
+ for i := int64(0); i < int64(len(p.txnGroups)); {
wait:
incompleteCount := atomic.LoadInt64(&groupsReady[i].incompleteCount)
if incompleteCount > 0 || (incompleteCount != dependencyFreeGroup && !completed[i]) {
select {
case done := <-groupDoneCh:
if done.err != nil {
- // if there is an error, report the error to the output channel.
- p.outChan <- LoadedTransactionGroup{
- Err: &GroupTaskError{
- err: done.err,
- GroupIdx: done.groupIdx,
- Address: done.task.address,
- CreatableIndex: done.task.creatableIndex,
- CreatableType: done.task.creatableType,
- },
+ groupsReady[done.groupIdx].err = &GroupTaskError{
+ err: done.err,
+ GroupIdx: done.groupIdx,
+ Address: done.task.address,
+ CreatableIndex: done.task.creatableIndex,
+ CreatableType: done.task.creatableType,
}
- return
}
if done.groupIdx > i {
// mark future txn as ready.
@@ -388,7 +431,7 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
}
}
next := i
- for ; next < int64(len(p.groups)); next++ {
+ for ; next < int64(len(p.txnGroups)); next++ {
if !completed[next] {
if next > i {
i = next
@@ -399,10 +442,11 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
delete(completed, next)
- // if we had no error, write the result to the output channel.
+ // write the result to the output channel.
// this write will not block since we preallocated enough space on the channel.
p.outChan <- LoadedTransactionGroup{
- TxnGroup: p.groups[next],
+ Err: groupsReady[next].err,
+ TxnGroup: p.txnGroups[next],
Accounts: groupsReady[next].balances,
Resources: groupsReady[next].resources,
}
@@ -460,15 +504,19 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
// if there was an error..
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
br := LoadedAccountDataEntry{
Address: task.address,
Data: &acctData,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionAcct(task.groupIndices[i], br, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionAcct(task.groupTasksIndices[i], br, groupDoneCh)
}
continue
}
@@ -479,7 +527,11 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
creator, ok, err = p.ledger.GetCreatorForRound(p.rnd, task.creatableIndex, task.creatableType)
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
if !ok {
re := LoadedResourcesEntry{
@@ -487,8 +539,8 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
CreatableType: task.creatableType,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionResource(task.groupIndices[i], re, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionResource(task.groupTasksIndices[i], re, groupDoneCh)
}
continue
}
@@ -508,7 +560,11 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
}
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
re := LoadedResourcesEntry{
Resource: &resource,
@@ -517,14 +573,8 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
CreatableType: task.creatableType,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionResource(task.groupIndices[i], re, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionResource(task.groupTasksIndices[i], re, groupDoneCh)
}
}
- // if we got here, it means that there was an error.
- // in every case we get here, the task is gurenteed to be a non-nil.
- for _, wt := range task.groups {
- // notify the channel of the error.
- wt.markCompletionAcctError(err, task, groupDoneCh)
- }
}
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go
index 05a672d32..2b553c974 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go
@@ -145,6 +145,9 @@ func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basic
return l.assets[addr][aidx], nil
}
+func (l *prefetcherAlignmentTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ panic("not implemented")
+}
func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
l.mu.Lock()
if l.requestedCreators == nil {
@@ -170,6 +173,7 @@ func (l *prefetcherAlignmentTestLedger) LatestTotals() (basics.Round, ledgercore
func (l *prefetcherAlignmentTestLedger) VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error) {
return nil, nil
}
+func (l *prefetcherAlignmentTestLedger) FlushCaches() {}
func parseLoadedAccountDataEntries(loadedAccountDataEntries []prefetcher.LoadedAccountDataEntry) map[basics.Address]struct{} {
if len(loadedAccountDataEntries) == 0 {
@@ -395,7 +399,6 @@ func TestEvaluatorPrefetcherAlignmentCreateAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -448,7 +451,6 @@ func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -503,8 +505,7 @@ func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
require.Equal(t, requested, prefetched)
}
-func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
- t.Skip("disabled")
+func TestEvaluatorPrefetcherAlignmentAssetOptInCloseTo(t *testing.T) {
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -570,8 +571,97 @@ func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
require.Equal(t, requested, prefetched)
}
+func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ assetID := basics.AssetIndex(5)
+ l := &prefetcherAlignmentTestLedger{
+ balances: map[basics.Address]ledgercore.AccountData{
+ rewardsPool(): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1234567890},
+ },
+ },
+ makeAddress(1): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000001},
+ TotalAssets: 1,
+ TotalAssetParams: 1,
+ },
+ },
+ makeAddress(2): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000002},
+ },
+ },
+ makeAddress(3): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000003},
+ },
+ },
+ },
+ assets: map[basics.Address]map[basics.AssetIndex]ledgercore.AssetResource{
+ makeAddress(1): {
+ assetID: {
+ AssetParams: &basics.AssetParams{},
+ AssetHolding: &basics.AssetHolding{},
+ },
+ },
+ makeAddress(2): {
+ assetID: {
+ AssetHolding: &basics.AssetHolding{Amount: 5},
+ },
+ },
+ makeAddress(3): {
+ assetID: {
+ AssetHolding: &basics.AssetHolding{},
+ },
+ },
+ },
+ creators: map[basics.CreatableIndex]basics.Address{
+ basics.CreatableIndex(assetID): makeAddress(1),
+ },
+ }
+
+ txn := transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(2),
+ GenesisHash: genesisHash(),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID,
+ AssetReceiver: makeAddress(3),
+ AssetAmount: 1,
+ },
+ }
+
+ requested, prefetched := run(t, l, txn)
+
+ prefetched.Accounts[rewardsPool()] = struct{}{}
+ require.Equal(t, requested, prefetched)
+
+ // zero transfer of any asset
+ txn = transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ GenesisHash: genesisHash(),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID + 12345,
+ AssetReceiver: makeAddress(2),
+ AssetAmount: 0,
+ },
+ }
+
+ requested, prefetched = run(t, l, txn)
+
+ prefetched.Accounts[rewardsPool()] = struct{}{}
+ require.Equal(t, requested, prefetched)
+}
+
func TestEvaluatorPrefetcherAlignmentAssetClawback(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -811,7 +901,6 @@ func TestEvaluatorPrefetcherAlignmentCreateApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -866,7 +955,6 @@ func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -925,7 +1013,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -990,7 +1077,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1055,7 +1141,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1116,15 +1201,14 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testi
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading accounts depends on the smart contract program. Ignore the addresses
- // not requested.
- requested.Accounts[makeAddress(5)] = struct{}{}
- requested.Accounts[makeAddress(3)] = struct{}{}
+ // Foreign accounts are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Accounts, makeAddress(5))
+ require.NotContains(t, prefetched.Accounts, makeAddress(3))
+
require.Equal(t, requested, prefetched)
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1185,15 +1269,13 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *te
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading foreign apps depends on the smart contract program. Ignore the apps
- // not requested.
- requested.Creators[creatable{cindex: 6, ctype: basics.AppCreatable}] = struct{}{}
- requested.Creators[creatable{cindex: 8, ctype: basics.AppCreatable}] = struct{}{}
+ // Foreign apps are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 6, ctype: basics.AppCreatable})
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 8, ctype: basics.AppCreatable})
require.Equal(t, requested, prefetched)
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1254,10 +1336,9 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading foreign assets depends on the smart contract program. Ignore the assets
- // not requested.
- requested.Creators[creatable{cindex: 6, ctype: basics.AssetCreatable}] = struct{}{}
- requested.Creators[creatable{cindex: 8, ctype: basics.AssetCreatable}] = struct{}{}
+ // Foreign apps are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 6, ctype: basics.AssetCreatable})
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 8, ctype: basics.AssetCreatable})
require.Equal(t, requested, prefetched)
}
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/internal/prefetcher/prefetcher_test.go
index 40fe6949b..555cc8f6d 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/internal/prefetcher/prefetcher_test.go
@@ -259,7 +259,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for a non-existing asset",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -296,7 +295,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for an existing asset",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -333,7 +331,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset transfer transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetTransferTx,
@@ -342,6 +339,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
AssetTransferTxnFields: transactions.AssetTransferTxnFields{
XferAsset: 1001,
+ AssetAmount: 1,
AssetSender: makeAddress(2),
AssetReceiver: makeAddress(3),
AssetCloseTo: makeAddress(4),
@@ -384,8 +382,52 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
},
{
+ name: "asset transfer transaction zero amount",
+ signedTxn: transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: 1001,
+ AssetSender: makeAddress(2),
+ AssetReceiver: makeAddress(3),
+ AssetCloseTo: makeAddress(4),
+ },
+ },
+ },
+ accounts: []prefetcher.LoadedAccountDataEntry{
+ {
+ Address: &feeSinkAddr,
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
+ },
+ {
+ Address: makeAddressPtr(1),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
+ },
+ },
+ },
+ resources: []prefetcher.LoadedResourcesEntry{
+ {
+ Address: makeAddressPtr(2),
+ CreatableIndex: 1001,
+ CreatableType: basics.AssetCreatable,
+ Resource: &ledgercore.AccountResource{},
+ },
+ {
+ Address: makeAddressPtr(4),
+ CreatableIndex: 1001,
+ CreatableType: basics.AssetCreatable,
+ Resource: &ledgercore.AccountResource{},
+ },
+ },
+ },
+ {
name: "asset freeze transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetFreezeTx,
@@ -435,7 +477,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "application transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.ApplicationCallTx,
@@ -471,20 +512,23 @@ func TestEvaluatorPrefetcher(t *testing.T) {
AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
},
},
- {
- Address: makeAddressPtr(4),
- Data: &ledgercore.AccountData{
- AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ /*
+ {
+ Address: makeAddressPtr(4),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
},
- },
- {
- Address: makeAddressPtr(5),
- Data: &ledgercore.AccountData{
- AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ {
+ Address: makeAddressPtr(5),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
},
- },
+ */
},
resources: []prefetcher.LoadedResourcesEntry{
+ /* - if we'll decide that we want to prefetch the foreign apps/assets, then this should be enabled
{
Address: makeAddressPtr(2),
CreatableIndex: 1001,
@@ -503,7 +547,8 @@ func TestEvaluatorPrefetcher(t *testing.T) {
CreatableType: basics.AppCreatable,
Resource: nil,
},
- /* - if we'll decide that we want to perfetch the account local state, then this should be enabled.
+ */
+ /* - if we'll decide that we want to prefetch the account local state, then this should be enabled.
{
address: acctAddrPtr(1),
creatableIndex: 10,
@@ -545,7 +590,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
// Test for error from LookupAsset
func TestAssetLookupError(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
@@ -568,10 +612,12 @@ func TestAssetLookupError(t *testing.T) {
}
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 2)
- for j := 0; j < 2; j++ {
+ const numGroups = 5
+ const txnPerGroup = 2
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = assetTransferTxn
if i == 2 {
// force error in asset lookup in the second txn group only
@@ -579,8 +625,12 @@ func TestAssetLookupError(t *testing.T) {
}
}
}
+
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.Equal(t, int64(2), loadedTxnGroup.Err.GroupIdx)
@@ -589,13 +639,14 @@ func TestAssetLookupError(t *testing.T) {
require.Equal(t, errorTriggerAssetIndex, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
// Test for error from GetCreatorForRound
func TestGetCreatorForRoundError(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
@@ -610,23 +661,33 @@ func TestGetCreatorForRoundError(t *testing.T) {
Sender: makeAddress(1),
},
AssetConfigTxnFields: transactions.AssetConfigTxnFields{
- ConfigAsset: errorTriggerCreatableIndex,
+ ConfigAsset: 101,
},
},
}
+ createAssetFailedTxn := createAssetTxn
+ createAssetFailedTxn.Txn.ConfigAsset = errorTriggerCreatableIndex
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 10)
- for j := 0; j < 10; j++ {
+ const numGroups = 5
+ const txnPerGroup = 10
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = createAssetTxn
+ // fail only the first txn in the first group
+ if i == 0 && j == 0 {
+ groups[i][j].SignedTxn = createAssetFailedTxn
+ }
}
}
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.True(t, errors.Is(loadedTxnGroup.Err, getCreatorError{}))
@@ -634,8 +695,10 @@ func TestGetCreatorForRoundError(t *testing.T) {
require.Equal(t, errorTriggerCreatableIndex, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
// Test for error from LookupWithoutRewards
@@ -658,29 +721,41 @@ func TestLookupWithoutRewards(t *testing.T) {
},
},
}
+ createAssetFailedTxn := createAssetTxn
+ createAssetFailedTxn.Txn.Sender = makeAddress(10)
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 10)
- for j := 0; j < 10; j++ {
+ const numGroups = 5
+ const txnPerGroup = 10
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = createAssetTxn
+ // fail only last txn in the first group
+ if i == 0 && j == txnPerGroup-1 {
+ groups[i][j].SignedTxn = createAssetFailedTxn
+ }
}
}
- ledger.errorTriggerAddress[createAssetTxn.Txn.Sender] = true
+ ledger.errorTriggerAddress[createAssetFailedTxn.Txn.Sender] = true
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.True(t, errors.Is(loadedTxnGroup.Err, lookupError{}))
- require.Equal(t, makeAddress(1), *loadedTxnGroup.Err.Address)
+ require.Equal(t, makeAddress(10), *loadedTxnGroup.Err.Address)
require.Equal(t, 0, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
func TestEvaluatorPrefetcherQueueExpansion(t *testing.T) {
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 85b31ade3..09ec1b3cd 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -532,6 +532,23 @@ func (l *Ledger) lookupResource(rnd basics.Round, addr basics.Address, aidx basi
return res, nil
}
+// LookupKv loads a KV pair from the accounts update
+func (l *Ledger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ return l.accts.LookupKv(rnd, key)
+}
+
+// LookupKeysByPrefix searches keys with specific prefix, up to `maxKeyNum`
+// if `maxKeyNum` == 0, then it loads all keys with such prefix
+func (l *Ledger) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ return l.accts.LookupKeysByPrefix(round, keyPrefix, maxKeyNum)
+}
+
// LookupAgreement returns account data used by agreement.
func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
l.trackerMu.RLock()
@@ -795,6 +812,11 @@ func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnB
})
}
+// FlushCaches flushes any pending data in caches so that it is fully available during future lookups.
+func (l *Ledger) FlushCaches() {
+ l.accts.flushCaches()
+}
+
// Validate uses the ledger to validate block blk as a candidate next block.
// It returns an error if blk is not the expected next block, or if blk is
// not a valid block (e.g., it has duplicate transactions, overspends some
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 95a0a84e2..7482a668d 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -2230,6 +2230,8 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) {
tp := trackerDBParams{
initAccounts: l.GenesisAccounts(),
initProto: l.GenesisProtoVersion(),
+ genesisHash: l.GenesisHash(),
+ fromCatchpoint: true,
catchpointEnabled: l.catchpoint.catchpointEnabled(),
dbPathPrefix: l.catchpoint.dbDirectory,
blockDb: l.blockDBs,
@@ -2382,14 +2384,15 @@ int %d // 10001000
func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
partitiontest.PartitionTest(t)
+ prevAccountDBVersion := accountDBVersion
accountDBVersion = 6
defer func() {
- accountDBVersion = 7
+ accountDBVersion = prevAccountDBVersion
}()
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
testProtocolVersion := protocol.ConsensusVersion("test-protocol-migrate-shrink-deltas")
proto := config.Consensus[protocol.ConsensusV31]
- proto.RewardsRateRefreshInterval = 500
+ proto.RewardsRateRefreshInterval = 200
config.Consensus[testProtocolVersion] = proto
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -2420,6 +2423,11 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
if err := accountsCreateCatchpointFirstStageInfoTable(ctx, tx); err != nil {
return err
}
+ // this line creates kvstore table, even if it is not required in accountDBVersion 6 -> 7
+ // or in later version where we need kvstore table, this test will fail
+ if err := accountsCreateBoxTable(ctx, tx); err != nil {
+ return err
+ }
return nil
})
require.NoError(t, err)
@@ -2446,7 +2454,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
l.trackers.acctsOnline = nil
l.acctsOnline = onlineAccounts{}
- maxBlocks := 2000
+ maxBlocks := 1000
accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts))
keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys))
// regular addresses: all init accounts minus pools
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index eb09706ff..bafd9f32a 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -40,12 +40,14 @@ type AccountBaseData struct {
RewardedMicroAlgos basics.MicroAlgos
AuthAddr basics.Address
- TotalAppSchema basics.StateSchema
- TotalExtraAppPages uint32
- TotalAppParams uint64
- TotalAppLocalStates uint64
- TotalAssetParams uint64
- TotalAssets uint64
+ TotalAppSchema basics.StateSchema // Totals across created globals, and opted in locals.
+ TotalExtraAppPages uint32 // Total number of extra pages across all created apps
+ TotalAppParams uint64 // Total number of apps this account has created
+ TotalAppLocalStates uint64 // Total number of apps this account is opted into.
+ TotalAssetParams uint64 // Total number of assets created by this account
+ TotalAssets uint64 // Total of asset creations and optins (i.e. number of holdings)
+ TotalBoxes uint64 // Total number of boxes associated to this account
+ TotalBoxBytes uint64 // Total bytes for this account's boxes. keys _and_ values count
}
// VotingData holds participation information
@@ -82,6 +84,8 @@ func ToAccountData(acct basics.AccountData) AccountData {
TotalAssets: uint64(len(acct.Assets)),
TotalAppParams: uint64(len(acct.AppParams)),
TotalAppLocalStates: uint64(len(acct.AppLocalStates)),
+ TotalBoxes: acct.TotalBoxes,
+ TotalBoxBytes: acct.TotalBoxBytes,
},
VotingData: VotingData{
VoteID: acct.VoteID,
@@ -112,6 +116,8 @@ func AssignAccountData(a *basics.AccountData, acct AccountData) {
a.AuthAddr = acct.AuthAddr
a.TotalAppSchema = acct.TotalAppSchema
a.TotalExtraAppPages = acct.TotalExtraAppPages
+ a.TotalBoxes = acct.TotalBoxes
+ a.TotalBoxBytes = acct.TotalBoxBytes
}
// WithUpdatedRewards calls basics account data WithUpdatedRewards
@@ -138,6 +144,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res basics.Micro
u.TotalAppSchema,
uint64(u.TotalAppParams), uint64(u.TotalAppLocalStates),
uint64(u.TotalExtraAppPages),
+ u.TotalBoxes, u.TotalBoxBytes,
)
}
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index 78da7526d..5ce0898b7 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -54,8 +54,8 @@ func MakeLeaseInLedgerError(txid transactions.Txid, lease Txlease) *LeaseInLedge
// Error implements the error interface for the LeaseInLedgerError stuct
func (lile *LeaseInLedgerError) Error() string {
// format the lease as address.
- addr := basics.Address(lile.lease.Lease)
- return fmt.Sprintf("transaction %v using an overlapping lease %s", lile.txid, addr.String())
+ leaseValue := basics.Address(lile.lease.Lease)
+ return fmt.Sprintf("transaction %v using an overlapping lease (sender, lease):(%s, %s)", lile.txid, lile.lease.Sender.String(), leaseValue.String())
}
// BlockInLedgerError is returned when a block cannot be added because it has already been done
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 57bbbb607..6df155a0b 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -73,21 +73,35 @@ type IncludedTransactions struct {
Intra uint64 // the index of the transaction in the block
}
+// A KvValueDelta shows how the Data associated with a key in the kvstore has
+// changed. However, OldData is elided during evaluation, and only filled in at
+// the conclusion of a block during the called to roundCowState.deltas()
+type KvValueDelta struct {
+ // Data stores the most recent value (nil == deleted)
+ Data []byte
+
+ // OldData stores the previous vlaue (nil == didn't exist)
+ OldData []byte
+}
+
// StateDelta describes the delta between a given round to the previous round
type StateDelta struct {
- // modified accounts
- // Accts AccountDeltas
-
// modified new accounts
Accts AccountDeltas
+ // modified kv pairs (nil == delete)
+ // not preallocated use .AddKvMod to insert instead of direct assignment
+ KvMods map[string]KvValueDelta
+
// new Txids for the txtail and TxnCounter, mapped to txn.LastValid
Txids map[transactions.Txid]IncludedTransactions
// new txleases for the txtail mapped to expiration
+ // not pre-allocated so use .AddTxLease to insert instead of direct assignment
Txleases map[Txlease]basics.Round
// new creatables creator lookup table
+ // not pre-allocated so use .AddCreatable to insert instead of direct assignment
Creatables map[basics.CreatableIndex]ModifiedCreatable
// new block header; read-only
@@ -107,8 +121,8 @@ type StateDelta struct {
Totals AccountTotals
}
-// NewBalanceRecord is similar to basics.BalanceRecord but with decoupled base and voting data
-type NewBalanceRecord struct {
+// BalanceRecord is similar to basics.BalanceRecord but with decoupled base and voting data
+type BalanceRecord struct {
Addr basics.Address
AccountData
}
@@ -160,18 +174,20 @@ type AssetResourceRecord struct {
// The map would point the address/address+creatable id onto the index of the
// element within the slice.
type AccountDeltas struct {
- // Actual data. If an account is deleted, `accts` contains the NewBalanceRecord
+ // Actual data. If an account is deleted, `Accts` contains the BalanceRecord
// with an empty `AccountData` and a populated `Addr`.
- accts []NewBalanceRecord
+ Accts []BalanceRecord
// cache for addr to deltas index resolution
acctsCache map[basics.Address]int
- // AppResources deltas. If app params or local state is deleted, there is a nil value in appResources.Params or appResources.State and Deleted flag set
- appResources []AppResourceRecord
+ // AppResources deltas. If app params or local state is deleted, there is a nil value in AppResources.Params or AppResources.State and Deleted flag set
+ AppResources []AppResourceRecord
// caches for {addr, app id} to app params delta resolution
+ // not preallocated - use UpsertAppResource instead of inserting directly
appResourcesCache map[AccountApp]int
- assetResources []AssetResourceRecord
+ AssetResources []AssetResourceRecord
+ // not preallocated - use UpsertAssertResource instead of inserting directly
assetResourcesCache map[AccountAsset]int
}
@@ -180,11 +196,9 @@ type AccountDeltas struct {
// This does not play well for AssetConfig and ApplicationCall transactions on scale
func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, stateProofNext basics.Round) StateDelta {
return StateDelta{
- Accts: MakeAccountDeltas(hint),
- Txids: make(map[transactions.Txid]IncludedTransactions, hint),
- Txleases: make(map[Txlease]basics.Round),
+ Accts: MakeAccountDeltas(hint),
+ Txids: make(map[transactions.Txid]IncludedTransactions, hint),
// asset or application creation are considered as rare events so do not pre-allocate space for them
- Creatables: make(map[basics.CreatableIndex]ModifiedCreatable),
Hdr: hdr,
StateProofNext: stateProofNext,
PrevTimestamp: prevTimestamp,
@@ -195,11 +209,8 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int,
// MakeAccountDeltas creates account delta
func MakeAccountDeltas(hint int) AccountDeltas {
return AccountDeltas{
- accts: make([]NewBalanceRecord, 0, hint*2),
+ Accts: make([]BalanceRecord, 0, hint*2),
acctsCache: make(map[basics.Address]int, hint*2),
-
- appResourcesCache: make(map[AccountApp]int),
- assetResourcesCache: make(map[AccountAsset]int),
}
}
@@ -209,13 +220,13 @@ func (ad AccountDeltas) GetData(addr basics.Address) (AccountData, bool) {
if !ok {
return AccountData{}, false
}
- return ad.accts[idx].AccountData, true
+ return ad.Accts[idx].AccountData, true
}
// GetAppParams returns app params delta value
func (ad AccountDeltas) GetAppParams(addr basics.Address, aidx basics.AppIndex) (AppParamsDelta, bool) {
if idx, ok := ad.appResourcesCache[AccountApp{addr, aidx}]; ok {
- result := ad.appResources[idx].Params
+ result := ad.AppResources[idx].Params
return result, result.Deleted || result.Params != nil
}
return AppParamsDelta{}, false
@@ -224,7 +235,7 @@ func (ad AccountDeltas) GetAppParams(addr basics.Address, aidx basics.AppIndex)
// GetAssetParams returns asset params delta value
func (ad AccountDeltas) GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (AssetParamsDelta, bool) {
if idx, ok := ad.assetResourcesCache[AccountAsset{addr, aidx}]; ok {
- result := ad.assetResources[idx].Params
+ result := ad.AssetResources[idx].Params
return result, result.Deleted || result.Params != nil
}
return AssetParamsDelta{}, false
@@ -233,7 +244,7 @@ func (ad AccountDeltas) GetAssetParams(addr basics.Address, aidx basics.AssetInd
// GetAppLocalState returns app local state delta value
func (ad AccountDeltas) GetAppLocalState(addr basics.Address, aidx basics.AppIndex) (AppLocalStateDelta, bool) {
if idx, ok := ad.appResourcesCache[AccountApp{addr, aidx}]; ok {
- result := ad.appResources[idx].State
+ result := ad.AppResources[idx].State
return result, result.Deleted || result.LocalState != nil
}
return AppLocalStateDelta{}, false
@@ -242,7 +253,7 @@ func (ad AccountDeltas) GetAppLocalState(addr basics.Address, aidx basics.AppInd
// GetAssetHolding returns asset holding delta value
func (ad AccountDeltas) GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (AssetHoldingDelta, bool) {
if idx, ok := ad.assetResourcesCache[AccountAsset{addr, aidx}]; ok {
- result := ad.assetResources[idx].Holding
+ result := ad.AssetResources[idx].Holding
return result, result.Deleted || result.Holding != nil
}
return AssetHoldingDelta{}, false
@@ -250,32 +261,32 @@ func (ad AccountDeltas) GetAssetHolding(addr basics.Address, aidx basics.AssetIn
// ModifiedAccounts returns list of addresses of modified accounts
func (ad AccountDeltas) ModifiedAccounts() []basics.Address {
- result := make([]basics.Address, len(ad.accts))
- for i := 0; i < len(ad.accts); i++ {
- result[i] = ad.accts[i].Addr
+ result := make([]basics.Address, len(ad.Accts))
+ for i := 0; i < len(ad.Accts); i++ {
+ result[i] = ad.Accts[i].Addr
}
// consistency check: ensure all addresses for deleted params/holdings/states are also in base accounts
// it is nice to check created params/holdings/states but we lack of such info here
for aapp, idx := range ad.appResourcesCache {
- if ad.appResources[idx].Params.Deleted {
+ if ad.AppResources[idx].Params.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account app param delta: addr %s not in base account", aapp.Address))
}
}
- if ad.appResources[idx].State.Deleted {
+ if ad.AppResources[idx].State.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account app state delta: addr %s not in base account", aapp.Address))
}
}
}
for aapp, idx := range ad.assetResourcesCache {
- if ad.assetResources[idx].Params.Deleted {
+ if ad.AssetResources[idx].Params.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account asset param delta: addr %s not in base account", aapp.Address))
}
}
- if ad.assetResources[idx].Holding.Deleted {
+ if ad.AssetResources[idx].Holding.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account asset holding delta: addr %s not in base account", aapp.Address))
}
@@ -287,20 +298,20 @@ func (ad AccountDeltas) ModifiedAccounts() []basics.Address {
// MergeAccounts applies other accounts into this StateDelta accounts
func (ad *AccountDeltas) MergeAccounts(other AccountDeltas) {
- for new := range other.accts {
- addr := other.accts[new].Addr
- acct := other.accts[new].AccountData
+ for new := range other.Accts {
+ addr := other.Accts[new].Addr
+ acct := other.Accts[new].AccountData
ad.Upsert(addr, acct)
}
for aapp, idx := range other.appResourcesCache {
- params := other.appResources[idx].Params
- state := other.appResources[idx].State
+ params := other.AppResources[idx].Params
+ state := other.AppResources[idx].State
ad.UpsertAppResource(aapp.Address, aapp.App, params, state)
}
for aapp, idx := range other.assetResourcesCache {
- params := other.assetResources[idx].Params
- holding := other.assetResources[idx].Holding
+ params := other.AssetResources[idx].Params
+ holding := other.AssetResources[idx].Holding
ad.UpsertAssetResource(aapp.Address, aapp.Asset, params, holding)
}
}
@@ -312,16 +323,16 @@ func (ad AccountDeltas) GetResource(addr basics.Address, aidx basics.CreatableIn
aa := AccountAsset{addr, basics.AssetIndex(aidx)}
idx, ok := ad.assetResourcesCache[aa]
if ok {
- ret.AssetParams = ad.assetResources[idx].Params.Params
- ret.AssetHolding = ad.assetResources[idx].Holding.Holding
+ ret.AssetParams = ad.AssetResources[idx].Params.Params
+ ret.AssetHolding = ad.AssetResources[idx].Holding.Holding
}
return ret, ok
case basics.AppCreatable:
aa := AccountApp{addr, basics.AppIndex(aidx)}
idx, ok := ad.appResourcesCache[aa]
if ok {
- ret.AppParams = ad.appResources[idx].Params.Params
- ret.AppLocalState = ad.appResources[idx].State.LocalState
+ ret.AppParams = ad.AppResources[idx].Params.Params
+ ret.AppLocalState = ad.AppResources[idx].State.LocalState
}
return ret, ok
}
@@ -330,24 +341,24 @@ func (ad AccountDeltas) GetResource(addr basics.Address, aidx basics.CreatableIn
// Len returns number of stored accounts
func (ad *AccountDeltas) Len() int {
- return len(ad.accts)
+ return len(ad.Accts)
}
// GetByIdx returns address and AccountData
// It does NOT check boundaries.
func (ad *AccountDeltas) GetByIdx(i int) (basics.Address, AccountData) {
- return ad.accts[i].Addr, ad.accts[i].AccountData
+ return ad.Accts[i].Addr, ad.Accts[i].AccountData
}
// Upsert adds ledgercore.AccountData into deltas
func (ad *AccountDeltas) Upsert(addr basics.Address, data AccountData) {
if idx, exist := ad.acctsCache[addr]; exist { // nil map lookup is OK
- ad.accts[idx] = NewBalanceRecord{Addr: addr, AccountData: data}
+ ad.Accts[idx] = BalanceRecord{Addr: addr, AccountData: data}
return
}
- last := len(ad.accts)
- ad.accts = append(ad.accts, NewBalanceRecord{Addr: addr, AccountData: data})
+ last := len(ad.Accts)
+ ad.Accts = append(ad.Accts, BalanceRecord{Addr: addr, AccountData: data})
if ad.acctsCache == nil {
ad.acctsCache = make(map[basics.Address]int)
@@ -360,12 +371,12 @@ func (ad *AccountDeltas) UpsertAppResource(addr basics.Address, aidx basics.AppI
key := AccountApp{addr, aidx}
value := AppResourceRecord{aidx, addr, params, state}
if idx, exist := ad.appResourcesCache[key]; exist {
- ad.appResources[idx] = value
+ ad.AppResources[idx] = value
return
}
- last := len(ad.appResources)
- ad.appResources = append(ad.appResources, value)
+ last := len(ad.AppResources)
+ ad.AppResources = append(ad.AppResources, value)
if ad.appResourcesCache == nil {
ad.appResourcesCache = make(map[AccountApp]int)
@@ -378,12 +389,12 @@ func (ad *AccountDeltas) UpsertAssetResource(addr basics.Address, aidx basics.As
key := AccountAsset{addr, aidx}
value := AssetResourceRecord{aidx, addr, params, holding}
if idx, exist := ad.assetResourcesCache[key]; exist {
- ad.assetResources[idx] = value
+ ad.AssetResources[idx] = value
return
}
- last := len(ad.assetResources)
- ad.assetResources = append(ad.assetResources, value)
+ last := len(ad.AssetResources)
+ ad.AssetResources = append(ad.AssetResources, value)
if ad.assetResourcesCache == nil {
ad.assetResourcesCache = make(map[AccountAsset]int)
@@ -391,15 +402,39 @@ func (ad *AccountDeltas) UpsertAssetResource(addr basics.Address, aidx basics.As
ad.assetResourcesCache[key] = last
}
+// AddTxLease adds a new TxLease to the StateDelta
+func (sd *StateDelta) AddTxLease(txLease Txlease, expired basics.Round) {
+ if sd.Txleases == nil {
+ sd.Txleases = make(map[Txlease]basics.Round)
+ }
+ sd.Txleases[txLease] = expired
+}
+
+// AddCreatable adds a new Creatable to the StateDelta
+func (sd *StateDelta) AddCreatable(idx basics.CreatableIndex, creatable ModifiedCreatable) {
+ if sd.Creatables == nil {
+ sd.Creatables = make(map[basics.CreatableIndex]ModifiedCreatable)
+ }
+ sd.Creatables[idx] = creatable
+}
+
+// AddKvMod adds a new KvMod to the StateDelta
+func (sd *StateDelta) AddKvMod(key string, delta KvValueDelta) {
+ if sd.KvMods == nil {
+ sd.KvMods = make(map[string]KvValueDelta)
+ }
+ sd.KvMods[key] = delta
+}
+
// OptimizeAllocatedMemory by reallocating maps to needed capacity
// For each data structure, reallocate if it would save us at least 50MB aggregate
// If provided maxBalLookback or maxTxnLife are zero, dependent optimizations will not occur.
func (sd *StateDelta) OptimizeAllocatedMemory(maxBalLookback uint64) {
- // accts takes up 232 bytes per entry, and is saved for 320 rounds
- if uint64(cap(sd.Accts.accts)-len(sd.Accts.accts))*accountArrayEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
- accts := make([]NewBalanceRecord, len(sd.Accts.accts))
- copy(accts, sd.Accts.accts)
- sd.Accts.accts = accts
+ // Accts takes up 232 bytes per entry, and is saved for 320 rounds
+ if uint64(cap(sd.Accts.Accts)-len(sd.Accts.Accts))*accountArrayEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
+ accts := make([]BalanceRecord, len(sd.Accts.Accts))
+ copy(accts, sd.Accts.Accts)
+ sd.Accts.Accts = accts
}
// acctsCache takes up 64 bytes per entry, and is saved for 320 rounds
@@ -423,14 +458,14 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
}
result := basics.AccountData{}
- acct := ad.accts[idx].AccountData
+ acct := ad.Accts[idx].AccountData
AssignAccountData(&result, acct)
if len(ad.appResourcesCache) > 0 {
result.AppParams = make(map[basics.AppIndex]basics.AppParams)
result.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
for aapp, idx := range ad.appResourcesCache {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if aapp.Address == addr {
if !rec.Params.Deleted && rec.Params.Params != nil {
result.AppParams[aapp.App] = *rec.Params.Params
@@ -452,7 +487,7 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
result.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
result.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
for aapp, idx := range ad.assetResourcesCache {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if aapp.Address == addr {
if !rec.Params.Deleted && rec.Params.Params != nil {
result.AssetParams[aapp.Asset] = *rec.Params.Params
@@ -475,9 +510,9 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
// ToModifiedCreatables is only used in tests, to create a map of ModifiedCreatable.
func (ad AccountDeltas) ToModifiedCreatables(seen map[basics.CreatableIndex]struct{}) map[basics.CreatableIndex]ModifiedCreatable {
- result := make(map[basics.CreatableIndex]ModifiedCreatable, len(ad.appResources)+len(ad.assetResources))
+ result := make(map[basics.CreatableIndex]ModifiedCreatable, len(ad.AppResources)+len(ad.AssetResources))
for aapp, idx := range ad.appResourcesCache {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.Params.Deleted {
result[basics.CreatableIndex(rec.Aidx)] = ModifiedCreatable{
Ctype: basics.AppCreatable,
@@ -496,7 +531,7 @@ func (ad AccountDeltas) ToModifiedCreatables(seen map[basics.CreatableIndex]stru
}
for aapp, idx := range ad.assetResourcesCache {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Params.Deleted {
result[basics.CreatableIndex(rec.Aidx)] = ModifiedCreatable{
Ctype: basics.AssetCreatable,
@@ -540,7 +575,7 @@ func AccumulateDeltas(base map[basics.Address]basics.AccountData, deltas Account
if ad.AppLocalStates == nil {
ad.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, acct.TotalAppLocalStates)
}
- rec := deltas.appResources[idx]
+ rec := deltas.AppResources[idx]
if rec.Params.Deleted {
delete(ad.AppParams, aapp.App)
} else if rec.Params.Params != nil {
@@ -566,7 +601,7 @@ func AccumulateDeltas(base map[basics.Address]basics.AccountData, deltas Account
if ad.Assets == nil {
ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, acct.TotalAssets)
}
- rec := deltas.assetResources[idx]
+ rec := deltas.AssetResources[idx]
if rec.Params.Deleted {
delete(ad.AssetParams, aapp.Asset)
} else if rec.Params.Params != nil {
@@ -617,7 +652,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.appResourcesCache {
if aapp.Address == addr {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.Params.Deleted {
delete(result.AppParams, aapp.App)
} else if rec.Params.Params != nil {
@@ -637,7 +672,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.appResourcesCache {
if aapp.Address == addr {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.State.Deleted {
delete(result.AppLocalStates, aapp.App)
} else if rec.State.LocalState != nil {
@@ -657,7 +692,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.assetResourcesCache {
if aapp.Address == addr {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Params.Deleted {
delete(result.AssetParams, aapp.Asset)
} else if rec.Params.Params != nil {
@@ -677,7 +712,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.assetResourcesCache {
if aapp.Address == addr {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Holding.Deleted {
delete(result.Assets, aapp.Asset)
} else if rec.Holding.Holding != nil {
@@ -695,10 +730,10 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
// GetAllAppResources returns all AppResourceRecords
func (ad *AccountDeltas) GetAllAppResources() []AppResourceRecord {
- return ad.appResources
+ return ad.AppResources
}
// GetAllAssetResources returns all AssetResourceRecords
func (ad *AccountDeltas) GetAllAssetResources() []AssetResourceRecord {
- return ad.assetResources
+ return ad.AssetResources
}
diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go
index c52a833eb..3947c8e91 100644
--- a/ledger/ledgercore/statedelta_test.go
+++ b/ledger/ledgercore/statedelta_test.go
@@ -96,6 +96,35 @@ func TestAccountDeltas(t *testing.T) {
a.Equal(sample1, data)
}
+func TestMakeStateDeltaMaps(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ sd := MakeStateDelta(nil, 0, 23000, basics.Round(2))
+ require.Nil(t, sd.Txleases)
+ require.Nil(t, sd.Creatables)
+ require.Nil(t, sd.KvMods)
+
+ sd.AddTxLease(Txlease{}, basics.Round(10))
+ require.Len(t, sd.Txleases, 1)
+ sd.AddCreatable(basics.CreatableIndex(5), ModifiedCreatable{})
+ require.Len(t, sd.Creatables, 1)
+ sd.AddKvMod("key", KvValueDelta{Data: []byte("value")})
+ require.Len(t, sd.KvMods, 1)
+
+ txLeaseMap := make(map[Txlease]basics.Round)
+ txLeaseMap[Txlease{}] = basics.Round(10)
+ require.Equal(t, sd.Txleases, txLeaseMap)
+
+ creatableMap := make(map[basics.CreatableIndex]ModifiedCreatable)
+ creatableMap[basics.CreatableIndex(5)] = ModifiedCreatable{}
+ require.Equal(t, sd.Creatables, creatableMap)
+
+ kvModMap := make(map[string]KvValueDelta)
+ kvModMap["key"] = KvValueDelta{Data: []byte("value")}
+ require.Equal(t, sd.KvMods, kvModMap)
+
+}
+
func BenchmarkMakeStateDelta(b *testing.B) {
hint := 23000
b.ReportAllocs()
diff --git a/ledger/lruaccts.go b/ledger/lruaccts.go
index 2c8752c4b..f698f9de7 100644
--- a/ledger/lruaccts.go
+++ b/ledger/lruaccts.go
@@ -37,6 +37,9 @@ type lruAccounts struct {
log logging.Logger
// pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries
pendingWritesWarnThreshold int
+
+ pendingNotFound chan basics.Address
+ notFound map[basics.Address]struct{}
}
// init initializes the lruAccounts for use.
@@ -45,6 +48,8 @@ func (m *lruAccounts) init(log logging.Logger, pendingWrites int, pendingWritesW
m.accountsList = newPersistedAccountList().allocateFreeNodes(pendingWrites)
m.accounts = make(map[basics.Address]*persistedAccountDataListNode, pendingWrites)
m.pendingAccounts = make(chan persistedAccountData, pendingWrites)
+ m.notFound = make(map[basics.Address]struct{}, pendingWrites)
+ m.pendingNotFound = make(chan basics.Address, pendingWrites)
m.log = log
m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
}
@@ -58,6 +63,13 @@ func (m *lruAccounts) read(addr basics.Address) (data persistedAccountData, has
return persistedAccountData{}, false
}
+// readNotFound returns whether we have attempted to read this address but it did not exist in the db.
+// thread locking semantics : read lock
+func (m *lruAccounts) readNotFound(addr basics.Address) bool {
+ _, ok := m.notFound[addr]
+ return ok
+}
+
// flushPendingWrites flushes the pending writes to the main lruAccounts cache.
// thread locking semantics : write lock
func (m *lruAccounts) flushPendingWrites() {
@@ -65,12 +77,25 @@ func (m *lruAccounts) flushPendingWrites() {
if pendingEntriesCount >= m.pendingWritesWarnThreshold {
m.log.Warnf("lruAccounts: number of entries in pendingAccounts(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
}
+
+outer:
for ; pendingEntriesCount > 0; pendingEntriesCount-- {
select {
case pendingAccountData := <-m.pendingAccounts:
m.write(pendingAccountData)
default:
- return
+ break outer
+ }
+ }
+
+ pendingEntriesCount = len(m.pendingNotFound)
+outer2:
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case addr := <-m.pendingNotFound:
+ m.notFound[addr] = struct{}{}
+ default:
+ break outer2
}
}
}
@@ -85,6 +110,16 @@ func (m *lruAccounts) writePending(acct persistedAccountData) {
}
}
+// writeNotFoundPending tags an address as not existing in the db.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruAccounts) writeNotFoundPending(addr basics.Address) {
+ select {
+ case m.pendingNotFound <- addr:
+ default:
+ }
+}
+
// write a single persistedAccountData to the lruAccounts cache.
// when writing the entry, the round number would be used to determine if it's a newer
// version of what's already on the cache or not. In all cases, the entry is going
@@ -117,5 +152,8 @@ func (m *lruAccounts) prune(newSize int) (removed int) {
m.accountsList.remove(back)
removed++
}
+
+ // clear the notFound list
+ m.notFound = make(map[basics.Address]struct{}, len(m.notFound))
return
}
diff --git a/ledger/lrukv.go b/ledger/lrukv.go
new file mode 100644
index 000000000..45f4f5027
--- /dev/null
+++ b/ledger/lrukv.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/logging"
+)
+
+//msgp:ignore cachedKVData
+type cachedKVData struct {
+ persistedKVData
+
+ // kv key
+ key string
+}
+
+// lruKV provides a storage class for the most recently used kv data.
+// It doesn't have any synchronization primitive on it's own and require to be
+// syncronized by the caller.
+type lruKV struct {
+ // kvList contain the list of persistedKVData, where the front ones are the most "fresh"
+ // and the ones on the back are the oldest.
+ kvList *persistedKVDataList
+
+ // kvs provides fast access to the various elements in the list by using the key
+ kvs map[string]*persistedKVDataListNode
+
+ // pendingKVs are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
+ // it would call flushPendingWrites and these would be merged into the kvs/kvList
+ pendingKVs chan cachedKVData
+
+ // log interface; used for logging the threshold event.
+ log logging.Logger
+
+ // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingKVs entries
+ pendingWritesWarnThreshold int
+}
+
+// init initializes the lruKV for use.
+// thread locking semantics : write lock
+func (m *lruKV) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
+ m.kvList = newPersistedKVList().allocateFreeNodes(pendingWrites)
+ m.kvs = make(map[string]*persistedKVDataListNode, pendingWrites)
+ m.pendingKVs = make(chan cachedKVData, pendingWrites)
+ m.log = log
+ m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
+}
+
+// read the persistedKVData object that the lruKV has for the given key.
+// thread locking semantics : read lock
+func (m *lruKV) read(key string) (data persistedKVData, has bool) {
+ if el := m.kvs[key]; el != nil {
+ return el.Value.persistedKVData, true
+ }
+ return persistedKVData{}, false
+}
+
+// flushPendingWrites flushes the pending writes to the main lruKV cache.
+// thread locking semantics : write lock
+func (m *lruKV) flushPendingWrites() {
+ pendingEntriesCount := len(m.pendingKVs)
+ if pendingEntriesCount >= m.pendingWritesWarnThreshold {
+ m.log.Warnf("lruKV: number of entries in pendingKVs(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
+ }
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case pendingKVData := <-m.pendingKVs:
+ m.write(pendingKVData.persistedKVData, pendingKVData.key)
+ default:
+ return
+ }
+ }
+}
+
+// writePending write a single persistedKVData entry to the pendingKVs buffer.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruKV) writePending(kv persistedKVData, key string) {
+ select {
+ case m.pendingKVs <- cachedKVData{persistedKVData: kv, key: key}:
+ default:
+ }
+}
+
+// write a single persistedKVData to the lruKV cache.
+// when writing the entry, the round number would be used to determine if it's a newer
+// version of what's already on the cache or not. In all cases, the entry is going
+// to be promoted to the front of the list.
+// thread locking semantics : write lock
+func (m *lruKV) write(kvData persistedKVData, key string) {
+ if el := m.kvs[key]; el != nil {
+ // already exists; is it a newer ?
+ if el.Value.before(&kvData) {
+ // we update with a newer version.
+ el.Value = &cachedKVData{persistedKVData: kvData, key: key}
+ }
+ m.kvList.moveToFront(el)
+ } else {
+ // new entry.
+ m.kvs[key] = m.kvList.pushFront(&cachedKVData{persistedKVData: kvData, key: key})
+ }
+}
+
+// prune adjust the current size of the lruKV cache, by dropping the least
+// recently used entries.
+// thread locking semantics : write lock
+func (m *lruKV) prune(newSize int) (removed int) {
+ for {
+ if len(m.kvs) <= newSize {
+ break
+ }
+ back := m.kvList.back()
+ delete(m.kvs, back.Value.key)
+ m.kvList.remove(back)
+ removed++
+ }
+ return
+}
diff --git a/ledger/lrukv_test.go b/ledger/lrukv_test.go
new file mode 100644
index 000000000..d26616731
--- /dev/null
+++ b/ledger/lrukv_test.go
@@ -0,0 +1,240 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestLRUBasicKV(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ baseKV.init(logging.TestingLog(t), 10, 5)
+
+ kvNum := 50
+ // write 50 KVs
+ for i := 0; i < kvNum; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.write(kv, fmt.Sprintf("key%d", i))
+ }
+
+ // verify that all these KVs are truly there.
+ for i := 0; i < kvNum; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := kvNum; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+
+ baseKV.prune(kvNum / 2)
+
+ // verify expected (missing/existing) entries
+ for i := 0; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+
+ if i >= kvNum/2 && i < kvNum {
+ // expected to have it.
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ } else {
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+ }
+}
+
+func TestLRUKVPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ kvNum := 250
+ baseKV.init(logging.TestingLog(t), kvNum*2, kvNum)
+
+ for i := 0; i < kvNum; i++ {
+ go func(i int) {
+ time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }(i)
+ }
+ testStarted := time.Now()
+ for {
+ baseKV.flushPendingWrites()
+
+ // check if all kvs were loaded into "main" cache.
+ allKVsLoaded := true
+ for i := 0; i < kvNum; i++ {
+ _, has := baseKV.read(fmt.Sprintf("key%d", i))
+ if !has {
+ allKVsLoaded = false
+ break
+ }
+ }
+ if allKVsLoaded {
+ break
+ }
+ if time.Since(testStarted).Seconds() > 20 {
+ require.Fail(t, "failed after waiting for 20 second")
+ }
+ // not yet, keep looping.
+ }
+}
+
+type lruKVTestLogger struct {
+ logging.Logger
+ WarnfCallback func(string, ...interface{})
+ warnMsgCount int
+}
+
+func (cl *lruKVTestLogger) Warnf(s string, args ...interface{}) {
+ cl.warnMsgCount++
+}
+
+func TestLRUKVPendingWritesWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+ for j := 0; j < 50; j++ {
+ for i := 0; i < j; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+ baseKV.flushPendingWrites()
+ if j >= pendingWritesThreshold {
+ // expect a warning in the log
+ require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
+ }
+ }
+}
+
+func TestLRUKVOmittedPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+
+ for i := 0; i < pendingWritesBuffer*2; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+
+ baseKV.flushPendingWrites()
+
+ // verify that all these kvs are truly there.
+ for i := 0; i < pendingWritesBuffer; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+}
+
+func BenchmarkLRUKVWrite(b *testing.B) {
+ numTestKV := 5000
+ // there are 2500 kvs that overlap
+ fillerKVs := generatePersistedKVData(0, 97500)
+ kvs := generatePersistedKVData(97500-numTestKV/2, 97500+numTestKV/2)
+
+ benchLruWriteKVs(b, fillerKVs, kvs)
+}
+
+func benchLruWriteKVs(b *testing.B, fillerKVs []cachedKVData, kvs []cachedKVData) {
+ b.ResetTimer()
+ b.StopTimer()
+ var baseKV lruKV
+ // setting up the baseKV with a predefined cache size
+ baseKV.init(logging.TestingLog(b), baseKVPendingBufferSize, baseKVPendingWarnThreshold)
+ for i := 0; i < b.N; i++ {
+ baseKV = fillLRUKV(baseKV, fillerKVs)
+
+ b.StartTimer()
+ fillLRUKV(baseKV, kvs)
+ b.StopTimer()
+ baseKV.prune(0)
+ }
+}
+
+func fillLRUKV(baseKV lruKV, fillerKVs []cachedKVData) lruKV {
+ for _, entry := range fillerKVs {
+ baseKV.write(entry.persistedKVData, entry.key)
+ }
+ return baseKV
+}
+
+func generatePersistedKVData(startRound, endRound int) []cachedKVData {
+ kvs := make([]cachedKVData, endRound-startRound)
+ for i := startRound; i < endRound; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+
+ kvs[i-startRound] = cachedKVData{
+ persistedKVData: persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i + startRound),
+ },
+ key: fmt.Sprintf("key%d", i),
+ }
+ }
+ return kvs
+}
diff --git a/ledger/lruresources.go b/ledger/lruresources.go
index 8ab62f0ff..70a2a4c14 100644
--- a/ledger/lruresources.go
+++ b/ledger/lruresources.go
@@ -48,6 +48,9 @@ type lruResources struct {
// pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingResources entries
pendingWritesWarnThreshold int
+
+ pendingNotFound chan accountCreatable
+ notFound map[accountCreatable]struct{}
}
// init initializes the lruResources for use.
@@ -56,6 +59,8 @@ func (m *lruResources) init(log logging.Logger, pendingWrites int, pendingWrites
m.resourcesList = newPersistedResourcesList().allocateFreeNodes(pendingWrites)
m.resources = make(map[accountCreatable]*persistedResourcesDataListNode, pendingWrites)
m.pendingResources = make(chan cachedResourceData, pendingWrites)
+ m.notFound = make(map[accountCreatable]struct{}, pendingWrites)
+ m.pendingNotFound = make(chan accountCreatable, pendingWrites)
m.log = log
m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
}
@@ -69,6 +74,13 @@ func (m *lruResources) read(addr basics.Address, aidx basics.CreatableIndex) (da
return persistedResourcesData{}, false
}
+// readNotFound returns whether we have attempted to read this address but it did not exist in the db.
+// thread locking semantics : read lock
+func (m *lruResources) readNotFound(addr basics.Address, idx basics.CreatableIndex) bool {
+ _, ok := m.notFound[accountCreatable{address: addr, index: idx}]
+ return ok
+}
+
// read the persistedResourcesData object that the lruResources has for the given address.
// thread locking semantics : read lock
func (m *lruResources) readAll(addr basics.Address) (ret []persistedResourcesData) {
@@ -87,12 +99,25 @@ func (m *lruResources) flushPendingWrites() {
if pendingEntriesCount >= m.pendingWritesWarnThreshold {
m.log.Warnf("lruResources: number of entries in pendingResources(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
}
+
+outer:
for ; pendingEntriesCount > 0; pendingEntriesCount-- {
select {
case pendingResourceData := <-m.pendingResources:
m.write(pendingResourceData.persistedResourcesData, pendingResourceData.address)
default:
- return
+ break outer
+ }
+ }
+
+ pendingEntriesCount = len(m.pendingNotFound)
+outer2:
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case key := <-m.pendingNotFound:
+ m.notFound[key] = struct{}{}
+ default:
+ break outer2
}
}
}
@@ -107,6 +132,16 @@ func (m *lruResources) writePending(acct persistedResourcesData, addr basics.Add
}
}
+// writeNotFoundPending tags an address as not existing in the db.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruResources) writeNotFoundPending(addr basics.Address, idx basics.CreatableIndex) {
+ select {
+ case m.pendingNotFound <- accountCreatable{address: addr, index: idx}:
+ default:
+ }
+}
+
// write a single persistedAccountData to the lruResources cache.
// when writing the entry, the round number would be used to determine if it's a newer
// version of what's already on the cache or not. In all cases, the entry is going
@@ -139,5 +174,8 @@ func (m *lruResources) prune(newSize int) (removed int) {
m.resourcesList.remove(back)
removed++
}
+
+ // clear the notFound list
+ m.notFound = make(map[accountCreatable]struct{}, len(m.notFound))
return
}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index d76a3a0db..9a1be6eb6 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -61,13 +61,13 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// catchpointFileBalancesChunkV6
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
+// catchpointFileChunkV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
//
// catchpointFirstStageInfo
// |-----> (*) MarshalMsg
@@ -101,6 +101,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// encodedKVRecordV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// hashKind
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
// resourceFlags
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -184,8 +200,8 @@ func (z CatchpointCatchupState) MsgIsZero() bool {
func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(8)
- var zb0001Mask uint16 /* 9 bits */
+ zb0001Len := uint32(9)
+ var zb0001Mask uint16 /* 10 bits */
if (*z).Totals.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x2
@@ -214,10 +230,14 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x80
}
- if (*z).Version == 0 {
+ if (*z).TotalKVs == 0 {
zb0001Len--
zb0001Mask |= 0x100
}
+ if (*z).Version == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x200
+ }
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len != 0 {
@@ -257,6 +277,11 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalChunks)
}
if (zb0001Mask & 0x100) == 0 { // if not empty
+ // string "kvsCount"
+ o = append(o, 0xa8, 0x6b, 0x76, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).TotalKVs)
+ }
+ if (zb0001Mask & 0x200) == 0 { // if not empty
// string "version"
o = append(o, 0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendUint64(o, (*z).Version)
@@ -333,6 +358,14 @@ func (z *CatchpointFileHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ (*z).TotalKVs, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalKVs")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
(*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Catchpoint")
@@ -406,6 +439,12 @@ func (z *CatchpointFileHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalChunks")
return
}
+ case "kvsCount":
+ (*z).TotalKVs, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalKVs")
+ return
+ }
case "catchpoint":
(*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
@@ -438,21 +477,21 @@ func (_ *CatchpointFileHeader) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *CatchpointFileHeader) Msgsize() (s int) {
- s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize()
+ s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *CatchpointFileHeader) MsgIsZero() bool {
- return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero())
+ return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(19)
- var zb0001Mask uint32 /* 21 bits */
+ zb0001Len := uint32(21)
+ var zb0001Mask uint32 /* 23 bits */
if (*z).baseVotingData.VoteID.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x1
@@ -525,10 +564,18 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x80000
}
- if (*z).UpdateRound == 0 {
+ if (*z).TotalBoxes == 0 {
zb0001Len--
zb0001Mask |= 0x100000
}
+ if (*z).TotalBoxBytes == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x200000
+ }
+ if (*z).UpdateRound == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x400000
+ }
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
if zb0001Len != 0 {
@@ -623,6 +670,16 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalAppLocalStates)
}
if (zb0001Mask & 0x100000) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendUint64(o, (*z).TotalBoxes)
+ }
+ if (zb0001Mask & 0x200000) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendUint64(o, (*z).TotalBoxBytes)
+ }
+ if (zb0001Mask & 0x400000) == 0 { // if not empty
// string "z"
o = append(o, 0xa1, 0x7a)
o = msgp.AppendUint64(o, (*z).UpdateRound)
@@ -747,6 +804,22 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteID")
@@ -896,6 +969,18 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalAppLocalStates")
return
}
+ case "m":
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "n":
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
case "A":
bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
@@ -958,13 +1043,13 @@ func (_ *baseAccountData) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *baseAccountData) Msgsize() (s int) {
- s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
+ s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *baseAccountData) MsgIsZero() bool {
- return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
+ return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0) && ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -1764,19 +1849,23 @@ func (z *catchpointFileBalancesChunkV5) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
+func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0002Len := uint32(1)
- var zb0002Mask uint8 /* 3 bits */
+ zb0003Len := uint32(2)
+ var zb0003Mask uint8 /* 4 bits */
if len((*z).Balances) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
+ zb0003Len--
+ zb0003Mask |= 0x2
}
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
+ if len((*z).KVs) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x4
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x2) == 0 { // if not empty
// string "bl"
o = append(o, 0xa2, 0x62, 0x6c)
if (*z).Balances == nil {
@@ -1788,48 +1877,82 @@ func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
o = (*z).Balances[zb0001].MarshalMsg(o)
}
}
+ if (zb0003Mask & 0x4) == 0 { // if not empty
+ // string "kv"
+ o = append(o, 0xa2, 0x6b, 0x76)
+ if (*z).KVs == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).KVs)))
+ }
+ for zb0002 := range (*z).KVs {
+ // omitempty: check for empty values
+ zb0004Len := uint32(2)
+ var zb0004Mask uint8 /* 3 bits */
+ if len((*z).KVs[zb0002].Key) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x2
+ }
+ if len((*z).KVs[zb0002].Value) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4
+ }
+ // variable map header, size zb0004Len
+ o = append(o, 0x80|uint8(zb0004Len))
+ if (zb0004Mask & 0x2) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendBytes(o, (*z).KVs[zb0002].Key)
+ }
+ if (zb0004Mask & 0x4) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).KVs[zb0002].Value)
+ }
+ }
+ }
}
return
}
-func (_ *catchpointFileBalancesChunkV6) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*catchpointFileBalancesChunkV6)
+func (_ *catchpointFileChunkV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFileChunkV6)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Balances")
return
}
- if zb0004 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(BalancesPerCatchpointFileChunk))
+ if zb0005 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "struct-from-array", "Balances")
return
}
- if zb0005 {
+ if zb0006 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0004 {
- (*z).Balances = ((*z).Balances)[:zb0004]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0005 {
+ (*z).Balances = ((*z).Balances)[:zb0005]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0004)
+ (*z).Balances = make([]encodedBalanceRecordV6, zb0005)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -1839,8 +1962,141 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
}
}
}
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs")
+ return
+ }
+ if zb0007 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk))
+ err = msgp.WrapError(err, "struct-from-array", "KVs")
+ return
+ }
+ if zb0008 {
+ (*z).KVs = nil
+ } else if (*z).KVs != nil && cap((*z).KVs) >= zb0007 {
+ (*z).KVs = ((*z).KVs)[:zb0007]
+ } else {
+ (*z).KVs = make([]encodedKVRecordV6, zb0007)
+ }
+ for zb0002 := range (*z).KVs {
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ if zb0009 > 0 {
+ zb0009--
+ var zb0011 int
+ zb0011, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ if zb0011 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ var zb0012 int
+ zb0012, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ if zb0012 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0009)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ if zb0010 {
+ (*z).KVs[zb0002] = encodedKVRecordV6{}
+ }
+ for zb0009 > 0 {
+ zb0009--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0013 int
+ zb0013, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
+ return
+ }
+ if zb0013 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
+ return
+ }
+ case "v":
+ var zb0014 int
+ zb0014, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
+ return
+ }
+ if zb0014 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1851,11 +2107,11 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
err = msgp.WrapError(err)
return
}
- if zb0003 {
- (*z) = catchpointFileBalancesChunkV6{}
+ if zb0004 {
+ (*z) = catchpointFileChunkV6{}
}
- for zb0002 > 0 {
- zb0002--
+ for zb0003 > 0 {
+ zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -1863,24 +2119,24 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
}
switch string(field) {
case "bl":
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Balances")
return
}
- if zb0006 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(BalancesPerCatchpointFileChunk))
+ if zb0015 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "Balances")
return
}
- if zb0007 {
+ if zb0016 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0006 {
- (*z).Balances = ((*z).Balances)[:zb0006]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0015 {
+ (*z).Balances = ((*z).Balances)[:zb0015]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0006)
+ (*z).Balances = make([]encodedBalanceRecordV6, zb0015)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -1889,6 +2145,137 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
return
}
}
+ case "kv":
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs")
+ return
+ }
+ if zb0017 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0017), uint64(BalancesPerCatchpointFileChunk))
+ err = msgp.WrapError(err, "KVs")
+ return
+ }
+ if zb0018 {
+ (*z).KVs = nil
+ } else if (*z).KVs != nil && cap((*z).KVs) >= zb0017 {
+ (*z).KVs = ((*z).KVs)[:zb0017]
+ } else {
+ (*z).KVs = make([]encodedKVRecordV6, zb0017)
+ }
+ for zb0002 := range (*z).KVs {
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ if zb0019 > 0 {
+ zb0019--
+ var zb0021 int
+ zb0021, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ if zb0021 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ zb0019--
+ var zb0022 int
+ zb0022, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ if zb0022 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0019)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ if zb0020 {
+ (*z).KVs[zb0002] = encodedKVRecordV6{}
+ }
+ for zb0019 > 0 {
+ zb0019--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Key")
+ return
+ }
+ if zb0023 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Key")
+ return
+ }
+ case "v":
+ var zb0024 int
+ zb0024, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Value")
+ return
+ }
+ if zb0024 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ }
+ }
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1902,31 +2289,35 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
return
}
-func (_ *catchpointFileBalancesChunkV6) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*catchpointFileBalancesChunkV6)
+func (_ *catchpointFileChunkV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFileChunkV6)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *catchpointFileBalancesChunkV6) Msgsize() (s int) {
+func (z *catchpointFileChunkV6) Msgsize() (s int) {
s = 1 + 3 + msgp.ArrayHeaderSize
for zb0001 := range (*z).Balances {
s += (*z).Balances[zb0001].Msgsize()
}
+ s += 3 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).KVs {
+ s += 1 + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Key) + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Value)
+ }
return
}
// MsgIsZero returns whether this is a zero value
-func (z *catchpointFileBalancesChunkV6) MsgIsZero() bool {
- return (len((*z).Balances) == 0)
+func (z *catchpointFileChunkV6) MsgIsZero() bool {
+ return (len((*z).Balances) == 0) && (len((*z).KVs) == 0)
}
// MarshalMsg implements msgp.Marshaler
func (z *catchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(5)
- var zb0001Mask uint8 /* 6 bits */
+ zb0001Len := uint32(6)
+ var zb0001Mask uint8 /* 7 bits */
if (*z).Totals.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x2
@@ -1943,10 +2334,14 @@ func (z *catchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x10
}
- if (*z).TrieBalancesHash.MsgIsZero() {
+ if (*z).TotalKVs == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
+ if (*z).TrieBalancesHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len != 0 {
@@ -1971,6 +2366,11 @@ func (z *catchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalChunks)
}
if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "kvsCount"
+ o = append(o, 0xa8, 0x6b, 0x76, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).TotalKVs)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
// string "trieBalancesHash"
o = append(o, 0xb0, 0x74, 0x72, 0x69, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68)
o = (*z).TrieBalancesHash.MarshalMsg(o)
@@ -2023,6 +2423,14 @@ func (z *catchpointFirstStageInfo) UnmarshalMsg(bts []byte) (o []byte, err error
}
if zb0001 > 0 {
zb0001--
+ (*z).TotalKVs, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalKVs")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
(*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TotalChunks")
@@ -2078,6 +2486,12 @@ func (z *catchpointFirstStageInfo) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "TotalAccounts")
return
}
+ case "kvsCount":
+ (*z).TotalKVs, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalKVs")
+ return
+ }
case "chunksCount":
(*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
@@ -2110,13 +2524,13 @@ func (_ *catchpointFirstStageInfo) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *catchpointFirstStageInfo) Msgsize() (s int) {
- s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size
+ s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *catchpointFirstStageInfo) MsgIsZero() bool {
- return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0)
+ return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2401,8 +2815,8 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err, "struct-from-array", "Resources")
return
}
- if zb0005 > basics.MaxEncodedAccountDataSize {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(basics.MaxEncodedAccountDataSize))
+ if zb0005 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
err = msgp.WrapError(err, "struct-from-array", "Resources")
return
}
@@ -2479,8 +2893,8 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err, "Resources")
return
}
- if zb0007 > basics.MaxEncodedAccountDataSize {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(basics.MaxEncodedAccountDataSize))
+ if zb0007 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
err = msgp.WrapError(err, "Resources")
return
}
@@ -2549,6 +2963,221 @@ func (z *encodedBalanceRecordV6) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *encodedKVRecordV6) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if len((*z).Key) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if len((*z).Value) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendBytes(o, (*z).Key)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).Value)
+ }
+ }
+ return
+}
+
+func (_ *encodedKVRecordV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*encodedKVRecordV6)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *encodedKVRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ if zb0003 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ if zb0004 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = encodedKVRecordV6{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ if zb0005 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ case "v":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ if zb0006 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *encodedKVRecordV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*encodedKVRecordV6)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *encodedKVRecordV6) Msgsize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + len((*z).Key) + 2 + msgp.BytesPrefixSize + len((*z).Value)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *encodedKVRecordV6) MsgIsZero() bool {
+ return (len((*z).Key) == 0) && (len((*z).Value) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z hashKind) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendByte(o, byte(z))
+ return
+}
+
+func (_ hashKind) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(hashKind)
+ if !ok {
+ _, ok = (z).(*hashKind)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *hashKind) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 byte
+ zb0001, bts, err = msgp.ReadByteBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = hashKind(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *hashKind) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*hashKind)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z hashKind) Msgsize() (s int) {
+ s = msgp.ByteSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z hashKind) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z resourceFlags) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint8(o, uint8(z))
diff --git a/ledger/msgp_gen_test.go b/ledger/msgp_gen_test.go
index 165ecec8d..248102398 100644
--- a/ledger/msgp_gen_test.go
+++ b/ledger/msgp_gen_test.go
@@ -314,9 +314,9 @@ func BenchmarkUnmarshalcatchpointFileBalancesChunkV5(b *testing.B) {
}
}
-func TestMarshalUnmarshalcatchpointFileBalancesChunkV6(t *testing.T) {
+func TestMarshalUnmarshalcatchpointFileChunkV6(t *testing.T) {
partitiontest.PartitionTest(t)
- v := catchpointFileBalancesChunkV6{}
+ v := catchpointFileChunkV6{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -335,12 +335,12 @@ func TestMarshalUnmarshalcatchpointFileBalancesChunkV6(t *testing.T) {
}
}
-func TestRandomizedEncodingcatchpointFileBalancesChunkV6(t *testing.T) {
- protocol.RunEncodingTest(t, &catchpointFileBalancesChunkV6{})
+func TestRandomizedEncodingcatchpointFileChunkV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &catchpointFileChunkV6{})
}
-func BenchmarkMarshalMsgcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkMarshalMsgcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -348,8 +348,8 @@ func BenchmarkMarshalMsgcatchpointFileBalancesChunkV6(b *testing.B) {
}
}
-func BenchmarkAppendMsgcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkAppendMsgcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -360,8 +360,8 @@ func BenchmarkAppendMsgcatchpointFileBalancesChunkV6(b *testing.B) {
}
}
-func BenchmarkUnmarshalcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkUnmarshalcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -554,6 +554,66 @@ func BenchmarkUnmarshalencodedBalanceRecordV6(b *testing.B) {
}
}
+func TestMarshalUnmarshalencodedKVRecordV6(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := encodedKVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingencodedKVRecordV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &encodedKVRecordV6{})
+}
+
+func BenchmarkMarshalMsgencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalresourcesData(t *testing.T) {
partitiontest.PartitionTest(t)
v := resourcesData{}
diff --git a/ledger/persistedkvs.go b/ledger/persistedkvs.go
new file mode 100644
index 000000000..34f3c36ec
--- /dev/null
+++ b/ledger/persistedkvs.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+// persistedKVDataList represents a doubly linked list.
+// must initiate with newPersistedKVList.
+type persistedKVDataList struct {
+ root persistedKVDataListNode // sentinel list element, only &root, root.prev, and root.next are used
+ freeList *persistedKVDataListNode // preallocated nodes location
+}
+
+type persistedKVDataListNode struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *persistedKVDataListNode
+
+ Value *cachedKVData
+}
+
+func newPersistedKVList() *persistedKVDataList {
+ l := new(persistedKVDataList)
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ // used as a helper but does not store value
+ l.freeList = new(persistedKVDataListNode)
+
+ return l
+}
+
+func (l *persistedKVDataList) insertNodeToFreeList(otherNode *persistedKVDataListNode) {
+ otherNode.next = l.freeList.next
+ otherNode.prev = nil
+ otherNode.Value = nil
+
+ l.freeList.next = otherNode
+}
+
+func (l *persistedKVDataList) getNewNode() *persistedKVDataListNode {
+ if l.freeList.next == nil {
+ return new(persistedKVDataListNode)
+ }
+ newNode := l.freeList.next
+ l.freeList.next = newNode.next
+
+ return newNode
+}
+
+func (l *persistedKVDataList) allocateFreeNodes(numAllocs int) *persistedKVDataList {
+ if l.freeList == nil {
+ return l
+ }
+ for i := 0; i < numAllocs; i++ {
+ l.insertNodeToFreeList(new(persistedKVDataListNode))
+ }
+
+ return l
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *persistedKVDataList) back() *persistedKVDataListNode {
+ isEmpty := func(list *persistedKVDataList) bool {
+ // assumes we are inserting correctly to the list - using pushFront.
+ return list.root.next == &list.root
+ }
+ if isEmpty(l) {
+ return nil
+ }
+ return l.root.prev
+}
+
+// remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *persistedKVDataList) remove(e *persistedKVDataListNode) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+
+ l.insertNodeToFreeList(e)
+}
+
+// pushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *persistedKVDataList) pushFront(v *cachedKVData) *persistedKVDataListNode {
+ newNode := l.getNewNode()
+ newNode.Value = v
+ return l.insertValue(newNode, &l.root)
+}
+
+// insertValue inserts e after at, increments l.len, and returns e.
+func (l *persistedKVDataList) insertValue(newNode *persistedKVDataListNode, at *persistedKVDataListNode) *persistedKVDataListNode {
+ n := at.next
+ at.next = newNode
+ newNode.prev = at
+ newNode.next = n
+ n.prev = newNode
+
+ return newNode
+}
+
+// moveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *persistedKVDataList) moveToFront(e *persistedKVDataListNode) {
+ if l.root.next == e {
+ return
+ }
+ l.move(e, &l.root)
+}
+
+// move moves e to next to at and returns e.
+func (l *persistedKVDataList) move(e, at *persistedKVDataListNode) *persistedKVDataListNode {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ n := at.next
+ at.next = e
+ e.prev = at
+ e.next = n
+ n.prev = e
+
+ return e
+}
diff --git a/ledger/persistedkvs_test.go b/ledger/persistedkvs_test.go
new file mode 100644
index 000000000..eb5ed9dff
--- /dev/null
+++ b/ledger/persistedkvs_test.go
@@ -0,0 +1,175 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func (l *persistedKVDataList) getRoot() dataListNode {
+ return &l.root
+}
+
+func (l *persistedKVDataListNode) getNext() dataListNode {
+ // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
+ if l.next == nil {
+ return nil
+ }
+ return l.next
+}
+
+func (l *persistedKVDataListNode) getPrev() dataListNode {
+ if l.prev == nil {
+ return nil
+ }
+ return l.prev
+}
+
+// inspect that the list seems like the array
+func checkListPointersBD(t *testing.T, l *persistedKVDataList, es []*persistedKVDataListNode) {
+ es2 := make([]dataListNode, len(es))
+ for i, el := range es {
+ es2[i] = el
+ }
+
+ checkListPointers(t, l, es2)
+}
+
+func TestRemoveFromListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e2, e1})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1})
+ l.remove(e3)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+}
+
+func TestAddingNewNodeWithAllocatedFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList().allocateFreeNodes(10)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ if countListSize(l.freeList) != 10 {
+ t.Errorf("free list did not allocate nodes")
+ return
+ }
+ // test elements
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+
+ if countListSize(l.freeList) != 9 {
+ t.Errorf("free list did not provide a node on new list entry")
+ return
+ }
+}
+
+func TestMultielementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ // test elements
+ e2 := l.pushFront(&cachedKVData{key: "key1"})
+ e1 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ e4 := l.pushFront(&cachedKVData{key: "key4"})
+ e5 := l.pushFront(&cachedKVData{key: "key5"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e4, e3, e1, e2})
+
+ l.move(e4, e1)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e3, e1, e4, e2})
+
+ l.remove(e5)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1, e4, e2})
+
+ l.move(e1, e4) // swap in middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1, e2})
+
+ l.moveToFront(e4)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1, e2})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1})
+
+ l.moveToFront(e3) // move from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1})
+
+ l.moveToFront(e1) // move from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ l.moveToFront(e1) // no movement
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ e2 = l.pushFront(&cachedKVData{key: "key2"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e3, e4})
+
+ l.remove(e3) // removing from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e4})
+
+ l.remove(e4) // removing from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ l.move(e2, e1) // swapping between two elements
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e2})
+
+ l.remove(e1) // removing front
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.move(e2, l.back()) // swapping element with itself.
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.remove(e2) // remove last one
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestSingleElementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ e := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.moveToFront(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.remove(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestRemovedNodeShouldBeMovedToFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ e := l.back()
+ l.remove(e)
+
+ for i := l.freeList.next; i != nil; i = i.next {
+ if i == e {
+ // stopping the tst with good results:
+ return
+ }
+ }
+ t.Error("expected the removed node to appear at the freelist")
+}
diff --git a/ledger/persistedresources_list.go b/ledger/persistedresources_list.go
index 57b0cdc44..baa7ac351 100644
--- a/ledger/persistedresources_list.go
+++ b/ledger/persistedresources_list.go
@@ -17,7 +17,7 @@
package ledger
// persistedResourcesDataList represents a doubly linked list.
-// must initiate with newPersistedAccountList.
+// must initiate with newPersistedResourcesList.
type persistedResourcesDataList struct {
root persistedResourcesDataListNode // sentinel list element, only &root, root.prev, and root.next are used
freeList *persistedResourcesDataListNode // preallocated nodes location
diff --git a/ledger/simple_test.go b/ledger/simple_test.go
new file mode 100644
index 000000000..22781c70f
--- /dev/null
+++ b/ledger/simple_test.go
@@ -0,0 +1,187 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+func newSimpleLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
+ return newSimpleLedgerWithConsensusVersion(t, balances, protocol.ConsensusFuture)
+}
+
+func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *Ledger {
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ return newSimpleLedgerFull(t, balances, cv, genHash)
+}
+
+func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *Ledger {
+ genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
+ require.NoError(t, err)
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: genBlock,
+ Accounts: balances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ return l
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func nextBlock(t testing.TB, ledger *Ledger) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
+ eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ Generate: true,
+ Validate: true, // Do the complete checks that a new txn would be subject to
+ })
+ require.NoError(t, err)
+ return eval
+}
+
+func fillDefaults(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
+ txn.GenesisHash = ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+
+ txn.FillDefaults(ledger.GenesisProto())
+}
+
+func txns(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn1 := range txns {
+ txn(t, ledger, eval, txn1)
+ }
+}
+
+func txn(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ fillDefaults(t, ledger, eval, txn)
+ err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 && problem[0] != "" {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.True(t, len(problem) == 0 || problem[0] == "")
+}
+
+func txgroup(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ fillDefaults(t, ledger, eval, txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ return eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func endBlock(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ // `rndBQ` gives the latest known block round added to the ledger
+ // we should wait until `rndBQ` block to be committed to blockQueue,
+ // in case there is a data race, noted in
+ // https://github.com/algorand/go-algorand/issues/4349
+ // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
+ // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
+ // A solution here would be wait until all new added blocks are committed,
+ // then we return the result and continue the execution.
+ rndBQ := ledger.Latest()
+ ledger.WaitForCommit(rndBQ)
+ return validatedBlock
+}
+
+// main wraps up some TEAL source in a header and footer so that it is
+// an app that does nothing at create time, but otherwise runs source,
+// then approves, if the source avoids panicing and leaves the stack
+// empty.
+func main(source string) string {
+ return strings.Replace(fmt.Sprintf(`txn ApplicationID
+ bz end
+ %s
+end: int 1`, source), ";", "\n", -1)
+}
+
+// lookup gets the current accountdata for an address
+func lookup(t testing.TB, ledger *Ledger, addr basics.Address) basics.AccountData {
+ ad, _, _, err := ledger.LookupLatest(addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func micros(t testing.TB, ledger *Ledger, addr basics.Address) uint64 {
+ return lookup(t, ledger, addr).MicroAlgos.Raw
+}
+
+// holding gets the current balance and optin status for some asa for an address
+func holding(t testing.TB, ledger *Ledger, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := lookup(t, ledger, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func asaParams(t testing.TB, ledger *Ledger, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := lookup(t, ledger, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
diff --git a/ledger/testing/consensusRange.go b/ledger/testing/consensusRange.go
new file mode 100644
index 000000000..877e03fae
--- /dev/null
+++ b/ledger/testing/consensusRange.go
@@ -0,0 +1,106 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+var consensusByNumber = []protocol.ConsensusVersion{
+ "", "", "", "", "", "", "",
+ protocol.ConsensusV7,
+ protocol.ConsensusV8,
+ protocol.ConsensusV9,
+ protocol.ConsensusV10,
+ protocol.ConsensusV11, // first with viable payset commit type
+ protocol.ConsensusV12,
+ protocol.ConsensusV13,
+ protocol.ConsensusV14,
+ protocol.ConsensusV15, // rewards in AD
+ protocol.ConsensusV16,
+ protocol.ConsensusV17,
+ protocol.ConsensusV18,
+ protocol.ConsensusV19,
+ protocol.ConsensusV20,
+ protocol.ConsensusV21,
+ protocol.ConsensusV22,
+ protocol.ConsensusV23,
+ protocol.ConsensusV24, // AVM v2 (apps)
+ protocol.ConsensusV25,
+ protocol.ConsensusV26,
+ protocol.ConsensusV27,
+ protocol.ConsensusV28,
+ protocol.ConsensusV29,
+ protocol.ConsensusV30, // AVM v5 (inner txs)
+ protocol.ConsensusV31, // AVM v6 (inner txs with appls)
+ protocol.ConsensusV32, // unlimited assets and apps
+ protocol.ConsensusV33, // 320 rounds
+ protocol.ConsensusV34, // AVM v7, stateproofs
+ protocol.ConsensusV35, // minor, double upgrade withe v34
+ protocol.ConsensusV36, // box storage
+ protocol.ConsensusFuture,
+}
+
+// TestConsensusRange allows for running tests against a range of consensus
+// versions. Generally `start` will be the version that introduced the feature,
+// and `stop` will be 0 to indicate it should work right on up through vFuture.
+// `stop` will be an actual version number if we're confirming that something
+// STOPS working as of a particular version. When writing the test for a new
+// feature that is currently in vFuture, use the expected version number as
+// `start`. That will correspond to vFuture until a new consensus version is
+// created and inserted in consensusByNumber. At that point, your feature is
+// probably active in that version. (If it's being held in vFuture, just
+// increment your `start`.)
+func TestConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int, cv protocol.ConsensusVersion)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
+ }
+ require.LessOrEqual(t, start, stop)
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) {
+ test(t, i, consensusByNumber[i])
+ })
+ }
+}
+
+// BenchConsensusRange is for getting benchmarks across consensus versions.
+func BenchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, ver int, cv protocol.ConsensusVersion)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
+ }
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) {
+ bench(b, i, consensusByNumber[i])
+ })
+ }
+}
diff --git a/ledger/testing/consensusRange_test.go b/ledger/testing/consensusRange_test.go
new file mode 100644
index 000000000..df51ec720
--- /dev/null
+++ b/ledger/testing/consensusRange_test.go
@@ -0,0 +1,58 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// TestReleasedVersion ensures that the necessary tidying is done when a new
+// protocol release happens. The new version must be added to
+// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
+func TestReleasedVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // This confirms that the proto before future has no ApprovedUpgrades. Once
+ // it does, that new version should be added to consensusByNumber.
+ require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
+ // And no funny business with vFuture
+ require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
+
+ // Ensure that vFuture gets a new LogicSigVersion when we promote the
+ // existing one. That allows TestExperimental in the logic package to
+ // prevent unintended releases of experimental opcodes.
+ relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ require.Less(t, int(relV), int(futureV))
+
+ // Require that all are present
+ for _, cv := range consensusByNumber {
+ if cv == "" {
+ continue
+ }
+ params, ok := config.Consensus[cv]
+ require.True(t, ok, string(cv))
+ require.NotZero(t, params) // just making sure an empty one didn't get put in
+ }
+
+}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 947ddc1b4..c3c559911 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -339,6 +339,11 @@ func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.Creatabl
data.TotalExtraAppPages = uint32(crypto.RandUint64() % 50)
}
+ if (crypto.RandUint64() % 3) == 1 {
+ data.TotalBoxes = crypto.RandUint64() % 100
+ data.TotalBoxBytes = crypto.RandUint64() % 10000
+ }
+
return data
}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index ae5077719..1945018ef 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -227,7 +227,7 @@ type deferredCommitRange struct {
catchpointSecondStage bool
}
-// deferredCommitContext is used in order to syncornize the persistence of a given deferredCommitRange.
+// deferredCommitContext is used in order to synchronize the persistence of a given deferredCommitRange.
// prepareCommit, commitRound and postCommit are all using it to exchange data.
type deferredCommitContext struct {
deferredCommitRange
@@ -243,10 +243,12 @@ type deferredCommitContext struct {
compactAccountDeltas compactAccountDeltas
compactResourcesDeltas compactResourcesDeltas
+ compactKvDeltas map[string]modifiedKvValue
compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable
updatedPersistedAccounts []persistedAccountData
updatedPersistedResources map[basics.Address][]persistedResourcesData
+ updatedPersistedKVs map[string]persistedKVData
compactOnlineAccountDeltas compactOnlineAccountDeltas
updatedPersistedOnlineAccounts []persistedOnlineAccountData
@@ -439,7 +441,7 @@ func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitCont
}
err := tr.commitRound(commit)
if err != nil {
- tr.log.Warnf("Could not commit round: %w", err)
+ tr.log.Warnf("Could not commit round: %v", err)
}
case <-tr.ctx.Done():
// drain the pending commits queue:
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
index 3e8773225..3b1f6ca01 100644
--- a/ledger/trackerdb.go
+++ b/ledger/trackerdb.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
@@ -38,6 +39,8 @@ import (
type trackerDBParams struct {
initAccounts map[basics.Address]basics.AccountData
initProto protocol.ConsensusVersion
+ genesisHash crypto.Digest
+ fromCatchpoint bool
catchpointEnabled bool
dbPathPrefix string
blockDb db.Pair
@@ -80,6 +83,8 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi
tp := trackerDBParams{
initAccounts: l.GenesisAccounts(),
initProto: l.GenesisProtoVersion(),
+ genesisHash: l.GenesisHash(),
+ fromCatchpoint: false,
catchpointEnabled: catchpointEnabled,
dbPathPrefix: dbPathPrefix,
blockDb: bdbs,
@@ -183,6 +188,18 @@ func runMigrations(ctx context.Context, tx *sql.Tx, params trackerDBParams, log
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 6 : %v", err)
return
}
+ case 7:
+ err = tu.upgradeDatabaseSchema7(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 7 : %v", err)
+ return
+ }
+ case 8:
+ err = tu.upgradeDatabaseSchema8(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 8 : %v", err)
+ return
+ }
default:
return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
}
@@ -503,6 +520,30 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context
return tu.setVersion(ctx, tx, 7)
}
+// upgradeDatabaseSchema7 upgrades the database schema from version 7 to version 8.
+// adding the kvstore table for box feature support.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema7(ctx context.Context, tx *sql.Tx) (err error) {
+ err = accountsCreateBoxTable(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema7 unable to create kvstore through createTables : %v", err)
+ }
+ return tu.setVersion(ctx, tx, 8)
+}
+
+// upgradeDatabaseSchema8 upgrades the database schema from version 8 to version 9,
+// forcing a rebuild of the accounthashes table on betanet nodes. Otherwise it has no effect.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context, tx *sql.Tx) (err error) {
+ betanetGenesisHash, _ := crypto.DigestFromString("TBMBVTC7W24RJNNUZCF7LWZD2NMESGZEQSMPG5XQD7JY4O7JKVWQ")
+ if tu.genesisHash == betanetGenesisHash && !tu.fromCatchpoint {
+ // reset hash round to 0, forcing catchpointTracker.initializeHashes to rebuild accounthashes
+ err = updateAccountsHashRound(ctx, tx, 0)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema8 unable to reset acctrounds table 'hashbase' round : %v", err)
+ }
+ }
+ return tu.setVersion(ctx, tx, 9)
+}
+
// isDirEmpty returns if a given directory is empty or not.
func isDirEmpty(path string) (bool, error) {
dir, err := os.Open(path)
diff --git a/ledger/internal/txnbench_test.go b/ledger/txnbench_test.go
index 9c92c896c..788ecbe1f 100644
--- a/ledger/internal/txnbench_test.go
+++ b/ledger/txnbench_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"errors"
@@ -27,14 +27,15 @@ import (
"github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
"github.com/stretchr/testify/require"
)
// BenchmarkTxnTypes compares the execution time of various txn types
func BenchmarkTxnTypes(b *testing.B) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- benchConsensusRange(b, 30, 0, func(b *testing.B, ver int) {
- l := newTestLedgerWithConsensusVersion(b, genBalances, consensusByNumber[ver])
+ ledgertesting.BenchConsensusRange(b, 30, 0, func(b *testing.B, ver int, cv protocol.ConsensusVersion) {
+ l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv)
defer l.Close()
createasa := txntest.Txn{
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index 409dac092..d32ee7568 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -279,7 +279,7 @@ func TestTxTailDeltaTracking(t *testing.T) {
LastValid: basics.Round(i + 50),
Intra: 0,
}
- deltas.Txleases[ledgercore.Txlease{Sender: blk.Payset[0].Txn.Sender, Lease: blk.Payset[0].Txn.Lease}] = basics.Round(i + 50)
+ deltas.AddTxLease(ledgercore.Txlease{Sender: blk.Payset[0].Txn.Sender, Lease: blk.Payset[0].Txn.Lease}, basics.Round(i+50))
txtail.newBlock(blk, deltas)
txtail.committedUpTo(basics.Round(i))
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index a7cdde4ea..962db20e7 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -26,15 +26,13 @@ import (
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
kmdclient "github.com/algorand/go-algorand/daemon/kmd/client"
"github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
modelV2 "github.com/algorand/go-algorand/daemon/algod/api/spec/v2"
"github.com/algorand/go-algorand/daemon/kmd/lib/kmdapi"
"github.com/algorand/go-algorand/data/basics"
@@ -61,7 +59,7 @@ type Client struct {
cacheDir string
consensus config.ConsensusProtocols
- suggestedParamsCache v1.TransactionParams
+ suggestedParamsCache model.TransactionParametersResponse
suggestedParamsExpire time.Time
suggestedParamsMaxAge time.Duration
}
@@ -617,7 +615,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by
tx.PaymentTxnFields.CloseRemainderTo = closeToAddr
}
- tx.Header.GenesisID = params.GenesisID
+ tx.Header.GenesisID = params.GenesisId
// Check if the protocol supports genesis hash
if cp.SupportGenesisHash {
@@ -640,7 +638,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by
/* Algod Wrappers */
// Status returns the node status
-func (c *Client) Status() (resp generatedV2.NodeStatusResponse, err error) {
+func (c *Client) Status() (resp model.NodeStatusResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.Status()
@@ -649,25 +647,16 @@ func (c *Client) Status() (resp generatedV2.NodeStatusResponse, err error) {
}
// AccountInformation takes an address and returns its information
-func (c *Client) AccountInformation(account string) (resp v1.Account, err error) {
+func (c *Client) AccountInformation(account string, includeCreatables bool) (resp model.Account, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
- resp, err = algod.AccountInformation(account)
- }
- return
-}
-
-// AccountInformationV2 takes an address and returns its information
-func (c *Client) AccountInformationV2(account string, includeCreatables bool) (resp generatedV2.Account, err error) {
- algod, err := c.ensureAlgodClient()
- if err == nil {
- resp, err = algod.AccountInformationV2(account, includeCreatables)
+ resp, err = algod.AccountInformation(account, includeCreatables)
}
return
}
// AccountApplicationInformation gets account information about a given app.
-func (c *Client) AccountApplicationInformation(accountAddress string, applicationID uint64) (resp generatedV2.AccountApplicationResponse, err error) {
+func (c *Client) AccountApplicationInformation(accountAddress string, applicationID uint64) (resp model.AccountApplicationResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.AccountApplicationInformation(accountAddress, applicationID)
@@ -689,7 +678,7 @@ func (c *Client) RawAccountApplicationInformation(accountAddress string, applica
}
// AccountAssetInformation gets account information about a given asset.
-func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) (resp generatedV2.AccountAssetResponse, err error) {
+func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) (resp model.AccountAssetResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.AccountAssetInformation(accountAddress, assetID)
@@ -715,7 +704,7 @@ func (c *Client) AccountData(account string) (accountData basics.AccountData, er
algod, err := c.ensureAlgodClient()
if err == nil {
var resp []byte
- resp, err = algod.RawAccountInformationV2(account)
+ resp, err = algod.RawAccountInformation(account)
if err == nil {
err = protocol.Decode(resp, &accountData)
}
@@ -724,23 +713,14 @@ func (c *Client) AccountData(account string) (accountData basics.AccountData, er
}
// AssetInformation takes an asset's index and returns its information
-func (c *Client) AssetInformation(index uint64) (resp v1.AssetParams, err error) {
- algod, err := c.ensureAlgodClient()
- if err == nil {
- resp, err = algod.AssetInformation(index)
- }
- return
-}
-
-// AssetInformationV2 takes an asset's index and returns its information
-func (c *Client) AssetInformationV2(index uint64) (resp generatedV2.Asset, err error) {
+func (c *Client) AssetInformation(index uint64) (resp model.Asset, err error) {
algod, err := c.ensureAlgodClient()
if err != nil {
return
}
- resp, err = algod.AssetInformationV2(index)
+ resp, err = algod.AssetInformation(index)
if err != nil {
- return generatedV2.Asset{}, err
+ return model.Asset{}, err
}
byteLen := func(p *[]byte) int {
@@ -767,7 +747,7 @@ func (c *Client) AssetInformationV2(index uint64) (resp generatedV2.Asset, err e
}
// ApplicationInformation takes an app's index and returns its information
-func (c *Client) ApplicationInformation(index uint64) (resp generatedV2.Application, err error) {
+func (c *Client) ApplicationInformation(index uint64) (resp model.Application, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.ApplicationInformation(index)
@@ -775,18 +755,28 @@ func (c *Client) ApplicationInformation(index uint64) (resp generatedV2.Applicat
return
}
-// TransactionInformation takes an address and associated txid and return its information
-func (c *Client) TransactionInformation(addr, txid string) (resp v1.Transaction, err error) {
+// ApplicationBoxes takes an app's index and returns the names of boxes under it
+func (c *Client) ApplicationBoxes(appID uint64, maxBoxNum uint64) (resp model.BoxesResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ resp, err = algod.ApplicationBoxes(appID, maxBoxNum)
+ }
+ return
+}
+
+// GetApplicationBoxByName takes an app's index and box name and returns its value.
+// The box name should be of the form `encoding:value`. See logic.AppCallBytes for more information.
+func (c *Client) GetApplicationBoxByName(index uint64, name string) (resp model.BoxResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
- resp, err = algod.TransactionInformation(addr, txid)
+ resp, err = algod.GetApplicationBoxByName(index, name)
}
return
}
// PendingTransactionInformation returns information about a recently issued
// transaction based on its txid.
-func (c *Client) PendingTransactionInformation(txid string) (resp v1.Transaction, err error) {
+func (c *Client) PendingTransactionInformation(txid string) (resp model.PendingTransactionResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.PendingTransactionInformation(txid)
@@ -794,18 +784,24 @@ func (c *Client) PendingTransactionInformation(txid string) (resp v1.Transaction
return
}
-// PendingTransactionInformationV2 returns information about a recently issued
-// transaction based on its txid.
-func (c *Client) PendingTransactionInformationV2(txid string) (resp generatedV2.PendingTransactionResponse, err error) {
+// ParsedPendingTransaction takes a txid and returns the parsed PendingTransaction response.
+func (c *Client) ParsedPendingTransaction(txid string) (txn v2.PreEncodedTxInfo, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
- resp, err = algod.PendingTransactionInformationV2(txid)
+ var resp []byte
+ resp, err = algod.RawPendingTransactionInformation(txid)
+ if err == nil {
+ err = protocol.DecodeReflect(resp, &txn)
+ if err != nil {
+ return
+ }
+ }
}
return
}
// Block takes a round and returns its block
-func (c *Client) Block(round uint64) (resp v1.Block, err error) {
+func (c *Client) Block(round uint64) (resp model.BlockResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.Block(round)
@@ -814,7 +810,7 @@ func (c *Client) Block(round uint64) (resp v1.Block, err error) {
}
// RawBlock takes a round and returns its block
-func (c *Client) RawBlock(round uint64) (resp v1.RawBlock, err error) {
+func (c *Client) RawBlock(round uint64) (resp []byte, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.RawBlock(round)
@@ -822,24 +818,31 @@ func (c *Client) RawBlock(round uint64) (resp v1.RawBlock, err error) {
return
}
-// BookkeepingBlock takes a round and returns its block
-func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) {
+// EncodedBlockCert takes a round and returns its parsed block and certificate
+func (c *Client) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
var resp []byte
resp, err = algod.RawBlock(round)
if err == nil {
- var b rpcs.EncodedBlockCert
- err = protocol.DecodeReflect(resp, &b)
+ err = protocol.Decode(resp, &blockCert)
if err != nil {
return
}
- block = b.Block
}
}
return
}
+// BookkeepingBlock takes a round and returns its block
+func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) {
+ blockCert, err := c.EncodedBlockCert(round)
+ if err == nil {
+ return blockCert.Block, nil
+ }
+ return
+}
+
// HealthCheck returns an error if something is wrong
func (c *Client) HealthCheck() error {
algod, err := c.ensureAlgodClient()
@@ -850,7 +853,7 @@ func (c *Client) HealthCheck() error {
}
// WaitForRound takes a round, waits until it appears and returns its status. This function blocks.
-func (c *Client) WaitForRound(round uint64) (resp generatedV2.NodeStatusResponse, err error) {
+func (c *Client) WaitForRound(round uint64) (resp model.NodeStatusResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.StatusAfterBlock(round)
@@ -860,7 +863,7 @@ func (c *Client) WaitForRound(round uint64) (resp generatedV2.NodeStatusResponse
// GetBalance takes an address and returns its total balance; if the address doesn't exist, it returns 0.
func (c *Client) GetBalance(address string) (uint64, error) {
- resp, err := c.AccountInformation(address)
+ resp, err := c.AccountInformation(address, false)
if err != nil {
return 0, err
}
@@ -877,7 +880,7 @@ func (c Client) AlgodVersions() (resp common.Version, err error) {
}
// LedgerSupply returns the total number of algos in the system
-func (c Client) LedgerSupply() (resp v1.Supply, err error) {
+func (c Client) LedgerSupply() (resp model.SupplyResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.LedgerSupply()
@@ -902,16 +905,16 @@ func (c Client) CurrentRound() (lastRound uint64, err error) {
func (c *Client) SuggestedFee() (fee uint64, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
- resp, err := algod.SuggestedFee()
+ params, err := algod.SuggestedParams()
if err == nil {
- fee = resp.Fee
+ fee = params.Fee
}
}
return
}
// SuggestedParams returns the suggested parameters for a new transaction
-func (c *Client) SuggestedParams() (params v1.TransactionParams, err error) {
+func (c *Client) SuggestedParams() (params model.TransactionParametersResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
params, err = algod.SuggestedParams()
@@ -924,7 +927,7 @@ func (c *Client) SetSuggestedParamsCacheAge(maxAge time.Duration) {
c.suggestedParamsMaxAge = maxAge
}
-func (c *Client) cachedSuggestedParams() (params v1.TransactionParams, err error) {
+func (c *Client) cachedSuggestedParams() (params model.TransactionParametersResponse, err error) {
if c.suggestedParamsMaxAge == 0 || time.Now().After(c.suggestedParamsExpire) {
params, err = c.SuggestedParams()
if err == nil && c.suggestedParamsMaxAge != 0 {
@@ -938,7 +941,7 @@ func (c *Client) cachedSuggestedParams() (params v1.TransactionParams, err error
// GetPendingTransactions gets a snapshot of current pending transactions on the node.
// If maxTxns = 0, fetches as many transactions as possible.
-func (c *Client) GetPendingTransactions(maxTxns uint64) (resp v1.PendingTransactions, err error) {
+func (c *Client) GetPendingTransactions(maxTxns uint64) (resp model.PendingTransactionsResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.GetPendingTransactions(maxTxns)
@@ -948,7 +951,7 @@ func (c *Client) GetPendingTransactions(maxTxns uint64) (resp v1.PendingTransact
// GetPendingTransactionsByAddress gets a snapshot of current pending transactions on the node for the given address.
// If maxTxns = 0, fetches as many transactions as possible.
-func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (resp v1.PendingTransactions, err error) {
+func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (resp model.PendingTransactionsResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
resp, err = algod.PendingTransactionsByAddr(addr, maxTxns)
@@ -956,6 +959,44 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r
return
}
+// PendingTransactions represents a parsed PendingTransactionsResponse struct.
+type PendingTransactions struct {
+ TopTransactions []transactions.SignedTxn `json:"top-transactions"`
+ TotalTransactions uint64 `json:"total-transactions"`
+}
+
+// GetParsedPendingTransactions returns the parsed response with pending transactions.
+func (c *Client) GetParsedPendingTransactions(maxTxns uint64) (txns PendingTransactions, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ var resp []byte
+ resp, err = algod.GetRawPendingTransactions(maxTxns)
+ if err == nil {
+ err = protocol.DecodeReflect(resp, &txns)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// GetParsedPendingTransactionsByAddress returns the parsed response with pending transactions by address.
+func (c *Client) GetParsedPendingTransactionsByAddress(addr string, maxTxns uint64) (txns PendingTransactions, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ var resp []byte
+ resp, err = algod.RawPendingTransactionsByAddr(addr, maxTxns)
+ if err == nil {
+ err = protocol.DecodeReflect(resp, &txns)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
// VerifyParticipationKey checks if a given participationID is installed in a loop until timeout has elapsed.
func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID string) error {
start := time.Now()
@@ -982,7 +1023,7 @@ func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID s
// AddParticipationKey takes a participation key file and sends it to the node.
// The key will be loaded into the system when the function returns successfully.
-func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
+func (c *Client) AddParticipationKey(keyfile string) (resp model.PostParticipationResponse, err error) {
data, err := os.ReadFile(keyfile)
if err != nil {
return
@@ -997,7 +1038,7 @@ func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostPartici
}
// GetParticipationKeys gets the currently installed participation keys.
-func (c *Client) GetParticipationKeys() (resp generated.ParticipationKeysResponse, err error) {
+func (c *Client) GetParticipationKeys() (resp model.ParticipationKeysResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
return algod.GetParticipationKeys()
@@ -1006,7 +1047,7 @@ func (c *Client) GetParticipationKeys() (resp generated.ParticipationKeysRespons
}
// GetParticipationKeyByID looks up a specific participation key by its participationID.
-func (c *Client) GetParticipationKeyByID(id string) (resp generated.ParticipationKeyResponse, err error) {
+func (c *Client) GetParticipationKeyByID(id string) (resp model.ParticipationKeyResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
return algod.GetParticipationKeyByID(id)
@@ -1034,7 +1075,7 @@ func (c *Client) ExportKey(walletHandle []byte, password, account string) (resp
// ConsensusParams returns the consensus parameters for the protocol active at the specified round
func (c *Client) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) {
- block, err := c.Block(round)
+ block, err := c.BookkeepingBlock(round)
if err != nil {
return
}
@@ -1089,7 +1130,7 @@ const defaultAppIdx = 1380011588
func MakeDryrunStateBytes(client Client, txnOrStxn interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string, format string) (result []byte, err error) {
switch format {
case "json":
- var gdr generatedV2.DryrunRequest
+ var gdr model.DryrunRequest
gdr, err = MakeDryrunStateGenerated(client, txnOrStxn, otherTxns, otherAccts, proto)
if err == nil {
result = protocol.EncodeJSON(&gdr)
@@ -1116,8 +1157,8 @@ func MakeDryrunState(client Client, txnOrStxn interface{}, otherTxns []transacti
return v2.DryrunRequestFromGenerated(&gdr)
}
-// MakeDryrunStateGenerated function creates generatedV2.DryrunRequest data structure
-func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string) (dr generatedV2.DryrunRequest, err error) {
+// MakeDryrunStateGenerated function creates model.DryrunRequest data structure
+func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string) (dr model.DryrunRequest, err error) {
var txns []transactions.SignedTxn
if txnOrStxnOrSlice != nil {
switch txnType := txnOrStxnOrSlice.(type) {
@@ -1152,16 +1193,16 @@ func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, other
apps := []basics.AppIndex{tx.ApplicationID}
apps = append(apps, tx.ForeignApps...)
for _, appIdx := range apps {
- var appParams generatedV2.ApplicationParams
+ var appParams model.ApplicationParams
if appIdx == 0 {
// if it is an app create txn then use params from the txn
appParams.ApprovalProgram = tx.ApprovalProgram
appParams.ClearStateProgram = tx.ClearStateProgram
- appParams.GlobalStateSchema = &generatedV2.ApplicationStateSchema{
+ appParams.GlobalStateSchema = &model.ApplicationStateSchema{
NumUint: tx.GlobalStateSchema.NumUint,
NumByteSlice: tx.GlobalStateSchema.NumByteSlice,
}
- appParams.LocalStateSchema = &generatedV2.ApplicationStateSchema{
+ appParams.LocalStateSchema = &model.ApplicationStateSchema{
NumUint: tx.LocalStateSchema.NumUint,
NumByteSlice: tx.LocalStateSchema.NumByteSlice,
}
@@ -1170,22 +1211,22 @@ func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, other
appIdx = defaultAppIdx
} else {
// otherwise need to fetch app state
- var app generatedV2.Application
+ var app model.Application
if app, err = client.ApplicationInformation(uint64(appIdx)); err != nil {
return
}
appParams = app.Params
accounts = append(accounts, appIdx.Address())
}
- dr.Apps = append(dr.Apps, generatedV2.Application{
+ dr.Apps = append(dr.Apps, model.Application{
Id: uint64(appIdx),
Params: appParams,
})
}
for _, acc := range accounts {
- var info generatedV2.Account
- if info, err = client.AccountInformationV2(acc.String(), true); err != nil {
+ var info model.Account
+ if info, err = client.AccountInformation(acc.String(), true); err != nil {
// ignore error - accounts might have app addresses that were not funded
continue
}
@@ -1196,18 +1237,18 @@ func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, other
if dr.Round, err = client.CurrentRound(); err != nil {
return
}
- var b v1.Block
- if b, err = client.Block(dr.Round); err != nil {
+ var b bookkeeping.Block
+ if b, err = client.BookkeepingBlock(dr.Round); err != nil {
return
}
- dr.LatestTimestamp = uint64(b.Timestamp)
+ dr.LatestTimestamp = uint64(b.BlockHeader.TimeStamp)
}
}
return
}
// Dryrun takes an app's index and returns its information
-func (c *Client) Dryrun(data []byte) (resp generatedV2.DryrunResponse, err error) {
+func (c *Client) Dryrun(data []byte) (resp model.DryrunResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
data, err = algod.RawDryrun(data)
@@ -1220,7 +1261,7 @@ func (c *Client) Dryrun(data []byte) (resp generatedV2.DryrunResponse, err error
}
// TransactionProof returns a Merkle proof for a transaction in a block.
-func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.HashType) (resp generatedV2.TransactionProofResponse, err error) {
+func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.HashType) (resp model.TransactionProofResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
return algod.TransactionProof(txid, round, hashType)
@@ -1229,7 +1270,7 @@ func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.Has
}
// LightBlockHeaderProof returns a Merkle proof for a block.
-func (c *Client) LightBlockHeaderProof(round uint64) (resp generatedV2.LightBlockHeaderProofResponse, err error) {
+func (c *Client) LightBlockHeaderProof(round uint64) (resp model.LightBlockHeaderProofResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
return algod.LightBlockHeaderProof(round)
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 88a1151a7..7bdb8981b 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -23,7 +23,7 @@ import (
"path/filepath"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/util/db"
@@ -31,7 +31,7 @@ import (
// chooseParticipation chooses which participation keys to use for going online
// based on the address, round number, and available participation databases
-func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part generated.ParticipationKey, err error) {
+func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part model.ParticipationKey, err error) {
parts, err := c.ListParticipationKeys()
if err != nil {
return
@@ -140,7 +140,7 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
// ListParticipationKeys returns the available participation keys,
// as a response object.
-func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKeysResponse, err error) {
+func (c *Client) ListParticipationKeys() (partKeyFiles model.ParticipationKeysResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
partKeyFiles, err = algod.GetParticipationKeys()
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index fb788f024..ac2eaf26a 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -23,8 +23,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -170,7 +169,7 @@ func (c *Client) BroadcastTransaction(stx transactions.SignedTxn) (txid string,
if err != nil {
return
}
- return resp.TxID, nil
+ return resp.TxId, nil
}
// BroadcastTransactionGroup broadcasts a signed transaction group to the network using algod
@@ -196,7 +195,7 @@ func (c *Client) SignAndBroadcastTransaction(walletHandle, pw []byte, utx transa
// generateRegistrationTransaction returns a transaction object for registering a Participation with its parent this is
// similar to account.Participation.GenerateRegistrationTransaction.
-func generateRegistrationTransaction(part generated.ParticipationKey, fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) (transactions.Transaction, error) {
+func generateRegistrationTransaction(part model.ParticipationKey, fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) (transactions.Transaction, error) {
addr, err := basics.UnmarshalChecksumAddress(part.Address)
if err != nil {
return transactions.Transaction{}, err
@@ -274,7 +273,7 @@ func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participa
basics.Round(txnLastValid),
leaseBytes, includeStateProofKeys)
- goOnlineTx.Header.GenesisID = params.GenesisID
+ goOnlineTx.Header.GenesisID = params.GenesisId
// Check if the protocol supports genesis hash
if config.Consensus[protocol.ConsensusFuture].SupportGenesisHash {
@@ -503,50 +502,50 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe
}
// MakeUnsignedAppCreateTx makes a transaction for creating an application
-func (c *Client) MakeUnsignedAppCreateTx(onComplete transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, extrapages uint32) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(0, appArgs, accounts, foreignApps, foreignAssets, onComplete, approvalProg, clearProg, globalSchema, localSchema, extrapages)
+func (c *Client) MakeUnsignedAppCreateTx(onComplete transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, extrapages uint32) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(0, appArgs, accounts, foreignApps, foreignAssets, boxes, onComplete, approvalProg, clearProg, globalSchema, localSchema, extrapages)
}
// MakeUnsignedAppUpdateTx makes a transaction for updating an application's programs
-func (c *Client) MakeUnsignedAppUpdateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, approvalProg []byte, clearProg []byte) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.UpdateApplicationOC, approvalProg, clearProg, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppUpdateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, approvalProg []byte, clearProg []byte) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.UpdateApplicationOC, approvalProg, clearProg, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppDeleteTx makes a transaction for deleting an application
-func (c *Client) MakeUnsignedAppDeleteTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.DeleteApplicationOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppDeleteTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.DeleteApplicationOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppOptInTx makes a transaction for opting in to (allocating
// some account-specific state for) an application
-func (c *Client) MakeUnsignedAppOptInTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.OptInOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppOptInTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.OptInOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppCloseOutTx makes a transaction for closing out of
// (deallocating all account-specific state for) an application
-func (c *Client) MakeUnsignedAppCloseOutTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.CloseOutOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppCloseOutTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.CloseOutOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppClearStateTx makes a transaction for clearing out all
// account-specific state for an application. It may not be rejected by the
// application's logic.
-func (c *Client) MakeUnsignedAppClearStateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.ClearStateOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppClearStateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.ClearStateOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppNoOpTx makes a transaction for interacting with an existing
// application, potentially updating any account-specific local state and
// global state associated with it.
-func (c *Client) MakeUnsignedAppNoOpTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.NoOpOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppNoOpTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.NoOpOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedApplicationCallTx is a helper for the above ApplicationCall
// transaction constructors. A fully custom ApplicationCall transaction may
// be constructed using this method.
-func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32) (tx transactions.Transaction, err error) {
+func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32) (tx transactions.Transaction, err error) {
tx.Type = protocol.ApplicationCallTx
tx.ApplicationID = basics.AppIndex(appIdx)
tx.OnCompletion = onCompletion
@@ -559,6 +558,7 @@ func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte,
tx.ForeignApps = parseTxnForeignApps(foreignApps)
tx.ForeignAssets = parseTxnForeignAssets(foreignAssets)
+ tx.Boxes = boxes
tx.ApprovalProgram = approvalProg
tx.ClearStateProgram = clearProg
tx.LocalStateSchema = localSchema
@@ -706,42 +706,32 @@ func (c *Client) MakeUnsignedAssetDestroyTx(index uint64) (transactions.Transact
func (c *Client) MakeUnsignedAssetConfigTx(creator string, index uint64, newManager *string, newReserve *string, newFreeze *string, newClawback *string) (transactions.Transaction, error) {
var tx transactions.Transaction
var err error
- var ok bool
- // If the creator was passed in blank, look up asset info by index
- var params v1.AssetParams
- if creator == "" {
- params, err = c.AssetInformation(index)
- if err != nil {
- return tx, err
- }
- } else {
- // Fetch the current state, to fill in as a template
- current, err := c.AccountInformation(creator)
- if err != nil {
- return tx, err
- }
+ asset, err := c.AssetInformation(index)
+ if err != nil {
+ return tx, err
+ }
+ params := asset.Params
- params, ok = current.AssetParams[index]
- if !ok {
- return tx, fmt.Errorf("asset ID %d not found in account %s", index, creator)
- }
+ // If creator was passed in, check that the asset params match.
+ if creator != "" && creator != params.Creator {
+ return tx, fmt.Errorf("creator %s does not match asset ID %d", creator, index)
}
if newManager == nil {
- newManager = &params.ManagerAddr
+ newManager = params.Manager
}
if newReserve == nil {
- newReserve = &params.ReserveAddr
+ newReserve = params.Reserve
}
if newFreeze == nil {
- newFreeze = &params.FreezeAddr
+ newFreeze = params.Freeze
}
if newClawback == nil {
- newClawback = &params.ClawbackAddr
+ newClawback = params.Clawback
}
tx.Type = protocol.AssetConfigTx
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index 15a046164..0cc633429 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -18,6 +18,8 @@ package telemetryspec
import (
"time"
+
+ "github.com/algorand/go-algorand/util"
)
// Telemetry Events
@@ -228,6 +230,8 @@ const DisconnectPeerEvent Event = "DisconnectPeer"
type DisconnectPeerEventDetails struct {
PeerEventDetails
Reason string
+ // Received message counters for this peer while it was connected
+ TXCount, MICount, AVCount, PPCount uint64
}
// ErrorOutputEvent event
@@ -301,7 +305,11 @@ type PeerConnectionDetails struct {
// MessageDelay is the avarage relative message delay. Not being used for incoming connection.
MessageDelay int64 `json:",omitempty"`
// DuplicateFilterCount is the number of times this peer has sent us a message hash to filter that it had already sent before.
- DuplicateFilterCount int64
+ DuplicateFilterCount uint64
+ // These message counters count received messages from this peer.
+ TXCount, MICount, AVCount, PPCount uint64
+ // TCPInfo provides connection measurements from TCP.
+ TCP util.TCPInfo `json:",omitempty"`
}
// CatchpointGenerationEvent event
@@ -321,12 +329,33 @@ type CatchpointGenerationEventDetails struct {
BalancesWriteTime uint64
// AccountsCount is the number of accounts that were written into the generated catchpoint file
AccountsCount uint64
+ // KVsCount is the number of accounts that were written into the generated catchpoint file
+ KVsCount uint64
// FileSize is the size of the catchpoint file, in bytes.
FileSize uint64
// CatchpointLabel is the catchpoint label for which the catchpoint file was generated.
CatchpointLabel string
}
+// CatchpointRootUpdateEvent event
+const CatchpointRootUpdateEvent Event = "CatchpointRoot"
+
+// CatchpointRootUpdateEventDetails is generated when the catchpoint merkle trie root is updated, when
+// account updates for rounds are flushed to disk.
+type CatchpointRootUpdateEventDetails struct {
+ Root string
+ OldBase uint64
+ NewBase uint64
+ NewPageCount int `json:"npc"`
+ NewNodeCount int `json:"nnc"`
+ UpdatedPageCount int `json:"upc"`
+ UpdatedNodeCount int `json:"unc"`
+ DeletedPageCount int `json:"dpc"`
+ FanoutReallocatedNodeCount int `json:"frnc"`
+ PackingReallocatedNodeCount int `json:"prnc"`
+ LoadedPages int `json:"lp"`
+}
+
// BalancesAccountVacuumEvent event
const BalancesAccountVacuumEvent Event = "VacuumBalances"
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index 5ce2a311d..ec538769c 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -44,6 +44,10 @@ type AssembleBlockStats struct {
StartCount int
IncludedCount int // number of transactions that are included in a block
InvalidCount int // number of transaction groups that are included in a block
+ MinFeeErrorCount int // number of transactions excluded because the fee is too low
+ ExpiredCount int // number of transactions removed because of expiration
+ ExpiredLongLivedCount int // number of expired transactions with non-super short LastValid values
+ LeaseErrorCount int // number of transactions removed because it has an already used lease
MinFee uint64
MaxFee uint64
AverageFee uint64
@@ -100,6 +104,10 @@ func (m AssembleBlockStats) String() string {
b.WriteString(fmt.Sprintf("StartCount:%d, ", m.StartCount))
b.WriteString(fmt.Sprintf("IncludedCount:%d, ", m.IncludedCount))
b.WriteString(fmt.Sprintf("InvalidCount:%d, ", m.InvalidCount))
+ b.WriteString(fmt.Sprintf("MinFeeErrorCount:%d, ", m.MinFeeErrorCount))
+ b.WriteString(fmt.Sprintf("ExpiredCount:%d, ", m.ExpiredCount))
+ b.WriteString(fmt.Sprintf("ExpiredLongLivedCount:%d, ", m.ExpiredLongLivedCount))
+ b.WriteString(fmt.Sprintf("LeaseErrorCount:%d, ", m.LeaseErrorCount))
b.WriteString(fmt.Sprintf("MinFee:%d, ", m.MinFee))
b.WriteString(fmt.Sprintf("MaxFee:%d, ", m.MaxFee))
b.WriteString(fmt.Sprintf("AverageFee:%d, ", m.AverageFee))
diff --git a/logging/usage.go b/logging/usage.go
index 6646dfbae..da668a72a 100644
--- a/logging/usage.go
+++ b/logging/usage.go
@@ -18,12 +18,16 @@ package logging
import (
"context"
+ "runtime"
"sync"
"time"
"github.com/algorand/go-algorand/util"
+ "github.com/algorand/go-algorand/util/metrics"
)
+var ramUsageGauge = metrics.MakeGauge(metrics.MetricName{Name: "algod_ram_usage", Description: "number of bytes runtime.ReadMemStats().HeapInuse"})
+
// UsageLogThread utility logging method
func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *sync.WaitGroup) {
if wg != nil {
@@ -34,6 +38,7 @@ func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *s
var prevUtime, prevStime int64
var Utime, Stime int64
var prevTime time.Time
+ var mst runtime.MemStats
ticker := time.NewTicker(period)
hasPrev := false
@@ -48,13 +53,16 @@ func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *s
now = time.Now()
Utime, Stime, _ = util.GetCurrentProcessTimes()
+ runtime.ReadMemStats(&mst)
+ ramUsageGauge.Set(float64(mst.HeapInuse))
+
if hasPrev {
userNanos := Utime - prevUtime
sysNanos := Stime - prevStime
wallNanos := now.Sub(prevTime).Nanoseconds()
userf := float64(userNanos) / float64(wallNanos)
sysf := float64(sysNanos) / float64(wallNanos)
- log.Infof("usage nanos wall=%d user=%d sys=%d pu=%0.4f%% ps=%0.4f%%", wallNanos, userNanos, sysNanos, userf*100.0, sysf*100.0)
+ log.Infof("usage nanos wall=%d user=%d sys=%d pu=%0.4f%% ps=%0.4f%% inuse=%d", wallNanos, userNanos, sysNanos, userf*100.0, sysf*100.0, mst.HeapInuse)
} else {
hasPrev = true
}
diff --git a/netdeploy/network.go b/netdeploy/network.go
index 78a665b78..6d14819c4 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -25,9 +25,8 @@ import (
"strings"
"time"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
-
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/nodecontrol"
@@ -402,7 +401,7 @@ func (n Network) Stop(binDir string) {
// NetworkNodeStatus represents the result from checking the status of a particular node instance
type NetworkNodeStatus struct {
- Status generatedV2.NodeStatusResponse
+ Status model.NodeStatusResponse
Error error
}
@@ -430,7 +429,7 @@ func (n Network) NodesStatus(binDir string) map[string]NetworkNodeStatus {
statuses := make(map[string]NetworkNodeStatus)
for _, relayDir := range n.cfg.RelayDirs {
- var status generatedV2.NodeStatusResponse
+ var status model.NodeStatusResponse
nc := nodecontrol.MakeNodeController(binDir, n.getNodeFullPath(relayDir))
algodClient, err := nc.AlgodClient()
if err == nil {
@@ -443,7 +442,7 @@ func (n Network) NodesStatus(binDir string) map[string]NetworkNodeStatus {
}
for _, nodeDir := range n.nodeDirs {
- var status generatedV2.NodeStatusResponse
+ var status model.NodeStatusResponse
nc := nodecontrol.MakeNodeController(binDir, n.getNodeFullPath(nodeDir))
algodClient, err := nc.AlgodClient()
if err == nil {
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 23828d469..353389c99 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -17,6 +17,7 @@
package remote
import (
+ "encoding/binary"
"encoding/json"
"fmt"
"io/fs"
@@ -103,6 +104,8 @@ type netState struct {
accounts []basics.Address
txnCount uint64
fundPerAccount basics.MicroAlgos
+
+ log logging.Logger
}
const program = `#pragma version 2
@@ -382,10 +385,10 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
}
accounts[addr] = alloc.State
-
}
//initial state
+ log := logging.NewLogger()
bootstrappedNet := netState{
nAssets: fileCfgs.GeneratedAssetsCount,
@@ -397,6 +400,7 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
genesisHash: genesis.Hash(),
poolAddr: poolAddr,
sinkAddr: sinkAddr,
+ log: log,
}
var params config.ConsensusParams
@@ -420,8 +424,9 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
max := fileCfgs.BalanceRange[1]
bal := rand.Int63n(max-min) + min
bootstrappedNet.fundPerAccount = basics.MicroAlgos{Raw: uint64(bal)}
- totalFunds := accounts[src].MicroAlgos.Raw + bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds
- accounts[src] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: totalFunds})
+ srcAcct := accounts[src]
+ srcAcct.MicroAlgos.Raw += bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds
+ accounts[src] = srcAcct
//init block
initState, err := generateInitState(accounts, &bootstrappedNet)
@@ -432,7 +437,6 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
localCfg.Archival = true
localCfg.CatchpointTracking = -1
localCfg.LedgerSynchronousMode = 0
- log := logging.NewLogger()
l, err := ledger.OpenLedger(log, filepath.Join(genesisFolder, "bootstrapped"), false, initState, localCfg)
if err != nil {
return err
@@ -440,16 +444,17 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
//create accounts, apps and assets
prev, _ := l.Block(l.Latest())
- err = generateAccounts(src, fileCfgs.RoundTransactionsCount, prev, l, &bootstrappedNet, params)
+ err = generateAccounts(src, fileCfgs.RoundTransactionsCount, prev, l, &bootstrappedNet, params, log)
if err != nil {
return err
}
+ log.Info("setup done, more txns")
//create more transactions
prev, _ = l.Block(l.Latest())
for i := uint64(bootstrappedNet.round); i < fileCfgs.NumRounds; i++ {
bootstrappedNet.round++
- blk, _ := createBlock(src, prev, fileCfgs.RoundTransactionsCount, &bootstrappedNet, params)
+ blk, _ := createBlock(src, prev, fileCfgs.RoundTransactionsCount, &bootstrappedNet, params, log)
err = l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
if err != nil {
fmt.Printf("Error %v\n", err)
@@ -516,7 +521,7 @@ func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrap
return initState, nil
}
-func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64, bootstrappedNet *netState, csParams config.ConsensusParams) (bookkeeping.Block, error) {
+func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64, bootstrappedNet *netState, csParams config.ConsensusParams, log logging.Logger) (bookkeeping.Block, error) {
payset := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt)
txibs := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt)
@@ -560,15 +565,17 @@ func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64,
return bookkeeping.Block{}, err
}
+ log.Infof("created block[%d] %d txns", block.BlockHeader.Round, len(payset))
+
return block, nil
}
-func generateAccounts(src basics.Address, roundTxnCnt uint64, prev bookkeeping.Block, l *ledger.Ledger, bootstrappedNet *netState, csParams config.ConsensusParams) error {
+func generateAccounts(src basics.Address, roundTxnCnt uint64, prev bookkeeping.Block, l *ledger.Ledger, bootstrappedNet *netState, csParams config.ConsensusParams, log logging.Logger) error {
for !bootstrappedNet.accountsCreated {
//create accounts
bootstrappedNet.round++
- blk, _ := createBlock(src, prev, roundTxnCnt, bootstrappedNet, csParams)
+ blk, _ := createBlock(src, prev, roundTxnCnt, bootstrappedNet, csParams, log)
err := l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
if err != nil {
fmt.Printf("Error %v\n", err)
@@ -627,12 +634,15 @@ func accountsNeeded(appsCount uint64, assetCount uint64, params config.Consensus
func createSignedTx(src basics.Address, round basics.Round, params config.ConsensusParams, bootstrappedNet *netState) ([]transactions.SignedTxn, error) {
if bootstrappedNet.nApplications == 0 && bootstrappedNet.nAccounts == 0 && bootstrappedNet.nAssets == 0 {
+ if !bootstrappedNet.accountsCreated {
+ bootstrappedNet.log.Infof("done creating accounts, have %d", len(bootstrappedNet.accounts))
+ }
bootstrappedNet.accountsCreated = true
}
var sgtxns []transactions.SignedTxn
header := transactions.Header{
- Fee: basics.MicroAlgos{Raw: 1},
+ Fee: basics.MicroAlgos{Raw: params.MinTxnFee},
FirstValid: round,
LastValid: round,
GenesisID: bootstrappedNet.genesisID,
@@ -640,7 +650,6 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
}
if bootstrappedNet.txnState == protocol.PaymentTx {
- var accounts []basics.Address
bootstrappedNet.appsPerAcct = 0
bootstrappedNet.assetPerAcct = 0
n := bootstrappedNet.nAccounts
@@ -652,7 +661,7 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
for i := uint64(0); i < n; i++ {
secretDst := keypair()
dst := basics.Address(secretDst.SignatureVerifier)
- accounts = append(accounts, dst)
+ bootstrappedNet.accounts = append(bootstrappedNet.accounts, dst)
header.Sender = src
@@ -668,16 +677,17 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
sgtxns = append(sgtxns, t)
}
bootstrappedNet.nAccounts -= uint64(len(sgtxns))
- bootstrappedNet.accounts = accounts
if bootstrappedNet.nAssets > 0 {
+ bootstrappedNet.log.Info("switch to acfg mode")
bootstrappedNet.txnState = protocol.AssetConfigTx
} else if bootstrappedNet.nApplications > 0 {
+ bootstrappedNet.log.Info("switch to app cfg mode")
bootstrappedNet.txnState = protocol.ApplicationCallTx
}
} else {
//send payments to created accounts randomly
- accti := rand.Intn(len(bootstrappedNet.accounts))
for i := uint64(0); i < n; i++ {
+ accti := rand.Intn(len(bootstrappedNet.accounts))
header.Sender = src
tx := transactions.Transaction{
Type: protocol.PaymentTx,
@@ -687,6 +697,8 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
Amount: basics.MicroAlgos{Raw: 0},
},
}
+ tx.Header.Note = make([]byte, 8)
+ binary.LittleEndian.PutUint64(tx.Header.Note, bootstrappedNet.roundTxnCnt+i)
t := transactions.SignedTxn{Txn: tx}
sgtxns = append(sgtxns, t)
}
@@ -731,8 +743,10 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
}
if bootstrappedNet.nAssets == 0 || bootstrappedNet.assetPerAcct == maxAssets {
if bootstrappedNet.nApplications > 0 {
+ bootstrappedNet.log.Info("switch to app cfg mode")
bootstrappedNet.txnState = protocol.ApplicationCallTx
} else {
+ bootstrappedNet.log.Info("switch to pay mode")
bootstrappedNet.txnState = protocol.PaymentTx
}
@@ -785,6 +799,7 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
maxApps = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
}
if bootstrappedNet.nApplications == 0 || bootstrappedNet.appsPerAcct == maxApps {
+ bootstrappedNet.log.Info("switch to pay mode")
bootstrappedNet.txnState = protocol.PaymentTx
}
}
@@ -1047,7 +1062,7 @@ func extractPublicPort(address string) (port int, err error) {
func computeRootStorage(nodeCount, relayCount int) int {
// For now, we'll just use root storage -- assume short-lived instances
// 10 per node should be good for a week (add relayCount * 0 so param is used)
- minGB := 10 + nodeCount*10 + (relayCount * 50)
+ minGB := 20 + (nodeCount * 10) + (relayCount * 50)
return minGB
}
diff --git a/netdeploy/remote/deployedNetwork_test.go b/netdeploy/remote/deployedNetwork_test.go
index 077649ea8..f33e7cea2 100644
--- a/netdeploy/remote/deployedNetwork_test.go
+++ b/netdeploy/remote/deployedNetwork_test.go
@@ -23,11 +23,12 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-func TestCreateSignedTx(t *testing.T) {
+func TestCreateSignedTxBasic(t *testing.T) {
partitiontest.PartitionTest(t)
var networkState netState
@@ -36,6 +37,7 @@ func TestCreateSignedTx(t *testing.T) {
networkState.nAccounts = 10
networkState.roundTxnCnt = 4
networkState.txnState = protocol.PaymentTx
+ networkState.log = logging.TestingLog(t)
params := config.Consensus[protocol.ConsensusCurrentVersion]
@@ -50,7 +52,8 @@ func TestCreateSignedTx(t *testing.T) {
require.Equal(t, protocol.PaymentTx, sntx.Txn.Type)
}
- initialAccounts := networkState.accounts
+ initialAccounts := make([]basics.Address, len(networkState.accounts))
+ copy(initialAccounts, networkState.accounts)
// should be creating assets next
sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState)
@@ -59,7 +62,7 @@ func TestCreateSignedTx(t *testing.T) {
require.Equal(t, protocol.ApplicationCallTx, networkState.txnState)
require.Equal(t, uint64(0), networkState.nAssets)
// same accounts should be used
- require.Equal(t, initialAccounts[0], accounts[0])
+ require.Equal(t, initialAccounts, accounts)
for _, sntx := range sgtxns {
require.Equal(t, protocol.AssetConfigTx, sntx.Txn.Type)
}
@@ -69,7 +72,7 @@ func TestCreateSignedTx(t *testing.T) {
require.Equal(t, 2, len(sgtxns))
require.Equal(t, protocol.PaymentTx, networkState.txnState)
require.Equal(t, uint64(0), networkState.nApplications)
- require.Equal(t, initialAccounts[0], accounts[0])
+ require.Equal(t, initialAccounts, accounts)
for _, sntx := range sgtxns {
require.Equal(t, protocol.ApplicationCallTx, sntx.Txn.Type)
}
@@ -78,14 +81,22 @@ func TestCreateSignedTx(t *testing.T) {
sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState)
require.Equal(t, 4, len(sgtxns))
require.Equal(t, protocol.PaymentTx, networkState.txnState)
- //new accounts should be created
- accounts = networkState.accounts
- require.NotEqual(t, initialAccounts[0], accounts[0])
+ require.Equal(t, initialAccounts, accounts)
for _, sntx := range sgtxns {
require.Equal(t, protocol.PaymentTx, sntx.Txn.Type)
}
+}
+func TestCreateSignedTxAssets(t *testing.T) {
// assets per account should not exceed limit
+ partitiontest.PartitionTest(t)
+
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ secretDst := keypair()
+ src := basics.Address(secretDst.SignatureVerifier)
+
+ var networkState netState
+ networkState.log = logging.TestingLog(t)
networkState.txnState = protocol.PaymentTx
networkState.nAssets = 10
networkState.nApplications = 10
diff --git a/network/limitlistener/rejectingLimitListener.go b/network/limitlistener/rejectingLimitListener.go
index 60d89199c..9d6a5914b 100644
--- a/network/limitlistener/rejectingLimitListener.go
+++ b/network/limitlistener/rejectingLimitListener.go
@@ -83,3 +83,7 @@ func (l *rejectingLimitListenerConn) Close() error {
l.releaseOnce.Do(l.release)
return err
}
+
+func (l *rejectingLimitListenerConn) UnderlyingConn() net.Conn {
+ return l.Conn
+}
diff --git a/network/requestTracker.go b/network/requestTracker.go
index fd78dadca..2ae34c81f 100644
--- a/network/requestTracker.go
+++ b/network/requestTracker.go
@@ -254,6 +254,10 @@ type requestTrackedConnection struct {
tracker *RequestTracker
}
+func (c *requestTrackedConnection) UnderlyingConn() net.Conn {
+ return c.Conn
+}
+
// Close removes the connection from the tracker's connections map and call the underlaying Close function.
func (c *requestTrackedConnection) Close() error {
c.tracker.hostRequestsMu.Lock()
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 48884f177..d6778a132 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -1058,7 +1058,7 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo
response.WriteHeader(http.StatusPreconditionFailed)
n, err := response.Write([]byte("mismatching genesis ID"))
if err != nil {
- wn.log.Warnf("ws failed to write mismatching genesis ID response '%s' : n = %d err = %v", n, err)
+ wn.log.Warnf("ws failed to write mismatching genesis ID response '%s' : n = %d err = %v", otherGenesisID, n, err)
}
return http.StatusPreconditionFailed
}
@@ -1440,9 +1440,10 @@ func (wn *WebsocketNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) {
// preparePeerData prepares batches of data for sending.
// It performs optional zstd compression for proposal massages
-func (wn *WebsocketNetwork) preparePeerData(request broadcastRequest, prio bool, peers []*wsPeer) ([][]byte, [][]byte, []crypto.Digest) {
+func (wn *WebsocketNetwork) preparePeerData(request broadcastRequest, prio bool, peers []*wsPeer) ([][]byte, [][]byte, []crypto.Digest, bool) {
// determine if there is a payload proposal and peers supporting compressed payloads
wantCompression := false
+ containsPrioPPTag := false
if prio {
wantCompression = checkCanCompress(request, peers)
}
@@ -1463,8 +1464,11 @@ func (wn *WebsocketNetwork) preparePeerData(request broadcastRequest, prio bool,
digests[i] = crypto.Hash(mbytes)
}
- if prio && request.tags[i] == protocol.ProposalPayloadTag {
- networkPrioPPNonCompressedSize.AddUint64(uint64(len(d)), nil)
+ if prio {
+ if request.tags[i] == protocol.ProposalPayloadTag {
+ networkPrioPPNonCompressedSize.AddUint64(uint64(len(d)), nil)
+ containsPrioPPTag = true
+ }
}
if wantCompression {
@@ -1482,7 +1486,7 @@ func (wn *WebsocketNetwork) preparePeerData(request broadcastRequest, prio bool,
}
}
}
- return data, dataCompressed, digests
+ return data, dataCompressed, digests, containsPrioPPTag
}
// prio is set if the broadcast is a high-priority broadcast.
@@ -1499,7 +1503,7 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
}
start := time.Now()
- data, dataWithCompression, digests := wn.preparePeerData(request, prio, peers)
+ data, dataWithCompression, digests, containsPrioPPTag := wn.preparePeerData(request, prio, peers)
// first send to all the easy outbound peers who don't block, get them started.
sentMessageCount := 0
@@ -1515,12 +1519,16 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
// if this peer supports compressed proposals and compressed data batch is filled out, use it
ok = peer.writeNonBlockMsgs(request.ctx, dataWithCompression, prio, digests, request.enqueueTime)
if prio {
- networkPrioBatchesPPWithCompression.Inc(nil)
+ if containsPrioPPTag {
+ networkPrioBatchesPPWithCompression.Inc(nil)
+ }
}
} else {
ok = peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime)
if prio {
- networkPrioBatchesPPWithoutCompression.Inc(nil)
+ if containsPrioPPTag {
+ networkPrioBatchesPPWithoutCompression.Inc(nil)
+ }
}
}
if ok {
@@ -1805,21 +1813,46 @@ func (wn *WebsocketNetwork) OnNetworkAdvance() {
// to the telemetry server. Internally, it's using a timer to ensure that it would only
// send the information once every hour ( configurable via PeerConnectionsUpdateInterval )
func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
+ if !wn.log.GetTelemetryEnabled() {
+ return
+ }
now := time.Now()
if wn.lastPeerConnectionsSent.Add(time.Duration(wn.config.PeerConnectionsUpdateInterval)*time.Second).After(now) || wn.config.PeerConnectionsUpdateInterval <= 0 {
// it's not yet time to send the update.
return
}
wn.lastPeerConnectionsSent = now
+
var peers []*wsPeer
peers, _ = wn.peerSnapshot(peers)
+ connectionDetails := wn.getPeerConnectionTelemetryDetails(now, peers)
+ wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.PeerConnectionsEvent, connectionDetails)
+}
+
+func (wn *WebsocketNetwork) getPeerConnectionTelemetryDetails(now time.Time, peers []*wsPeer) telemetryspec.PeersConnectionDetails {
var connectionDetails telemetryspec.PeersConnectionDetails
for _, peer := range peers {
connDetail := telemetryspec.PeerConnectionDetails{
ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
TelemetryGUID: peer.TelemetryGUID,
InstanceName: peer.InstanceName,
- DuplicateFilterCount: peer.duplicateFilterCount,
+ DuplicateFilterCount: atomic.LoadUint64(&peer.duplicateFilterCount),
+ TXCount: atomic.LoadUint64(&peer.txMessageCount),
+ MICount: atomic.LoadUint64(&peer.miMessageCount),
+ AVCount: atomic.LoadUint64(&peer.avMessageCount),
+ PPCount: atomic.LoadUint64(&peer.ppMessageCount),
+ }
+ // unwrap websocket.Conn, requestTrackedConnection, rejectingLimitListenerConn
+ var uconn net.Conn = peer.conn.UnderlyingConn()
+ for i := 0; i < 10; i++ {
+ wconn, ok := uconn.(wrappedConn)
+ if !ok {
+ break
+ }
+ uconn = wconn.UnderlyingConn()
+ }
+ if tcpInfo, err := util.GetConnTCPInfo(uconn); err == nil && tcpInfo != nil {
+ connDetail.TCP = *tcpInfo
}
if peer.outgoing {
connDetail.Address = justHost(peer.conn.RemoteAddr().String())
@@ -1831,8 +1864,7 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
connectionDetails.IncomingPeers = append(connectionDetails.IncomingPeers, connDetail)
}
}
-
- wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.PeerConnectionsEvent, connectionDetails)
+ return connectionDetails
}
// prioWeightRefreshTime controls how often we refresh the weights
@@ -2294,6 +2326,10 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
telemetryspec.DisconnectPeerEventDetails{
PeerEventDetails: eventDetails,
Reason: string(reason),
+ TXCount: atomic.LoadUint64(&peer.txMessageCount),
+ MICount: atomic.LoadUint64(&peer.miMessageCount),
+ AVCount: atomic.LoadUint64(&peer.avMessageCount),
+ PPCount: atomic.LoadUint64(&peer.ppMessageCount),
})
peers.Set(float64(wn.NumPeers()))
@@ -2425,6 +2461,7 @@ func (wn *WebsocketNetwork) updateMessagesOfInterestEnc() {
atomic.AddUint32(&wn.messagesOfInterestGeneration, 1)
var peers []*wsPeer
peers, _ = wn.peerSnapshot(peers)
+ wn.log.Infof("updateMessagesOfInterestEnc maybe sending messagesOfInterest %v", wn.messagesOfInterest)
for _, peer := range peers {
wn.maybeSendMessagesOfInterest(peer, wn.messagesOfInterestEnc)
}
@@ -2436,9 +2473,11 @@ func (wn *WebsocketNetwork) postMessagesOfInterestThread() {
// if we're not a relay, and not participating, we don't need txn pool
wantTXGossip := wn.nodeInfo.IsParticipating()
if wantTXGossip && (wn.wantTXGossip != wantTXGossipYes) {
+ wn.log.Infof("postMessagesOfInterestThread: enabling TX gossip")
wn.RegisterMessageInterest(protocol.TxnTag)
atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipYes)
} else if !wantTXGossip && (wn.wantTXGossip != wantTXGossipNo) {
+ wn.log.Infof("postMessagesOfInterestThread: disabling TX gossip")
wn.DeregisterMessageInterest(protocol.TxnTag)
atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipNo)
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 37dd646aa..61e067828 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "encoding/json"
"fmt"
"io"
"math/rand"
@@ -268,14 +269,17 @@ func netStop(t testing.TB, wn *WebsocketNetwork, name string) {
t.Logf("%s done", name)
}
-// Set up two nodes, test that a.Broadcast is received by B
-func TestWebsocketNetworkBasic(t *testing.T) {
- partitiontest.PartitionTest(t)
+func setupWebsocketNetworkAB(t *testing.T, countTarget int) (*WebsocketNetwork, *WebsocketNetwork, *messageCounterHandler, func()) {
+ success := false
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer netStop(t, netA, "A")
+ defer func() {
+ if !success {
+ netStop(t, netA, "A")
+ }
+ }()
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -283,9 +287,12 @@ func TestWebsocketNetworkBasic(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer netStop(t, netB, "B")
- counter := newMessageCounter(t, 2)
- counterDone := counter.done
+ defer func() {
+ if !success {
+ netStop(t, netB, "B")
+ }
+ }()
+ counter := newMessageCounter(t, countTarget)
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
@@ -294,6 +301,21 @@ func TestWebsocketNetworkBasic(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
+ success = true
+ closeFunc := func() {
+ netStop(t, netB, "B")
+ netStop(t, netB, "A")
+ }
+ return netA, netB, counter, closeFunc
+}
+
+// Set up two nodes, test that a.Broadcast is received by B
+func TestWebsocketNetworkBasic(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ netA, _, counter, closeFunc := setupWebsocketNetworkAB(t, 2)
+ defer closeFunc()
+ counterDone := counter.done
netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
@@ -384,27 +406,9 @@ func TestWebsocketProposalPayloadCompression(t *testing.T) {
func TestWebsocketNetworkUnicast(t *testing.T) {
partitiontest.PartitionTest(t)
- netA := makeTestWebsocketNode(t)
- netA.config.GossipFanout = 1
- netA.Start()
- defer netStop(t, netA, "A")
- netB := makeTestWebsocketNode(t)
- netB.config.GossipFanout = 1
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.Start()
- defer netStop(t, netB, "B")
- counter := newMessageCounter(t, 2)
+ netA, _, counter, closeFunc := setupWebsocketNetworkAB(t, 2)
+ defer closeFunc()
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- t.Log("a ready")
- waitReady(t, netB, readyTimeout.C)
- t.Log("b ready")
require.Equal(t, 1, len(netA.peers))
require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn)))
@@ -425,26 +429,8 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
func TestWebsocketPeerData(t *testing.T) {
partitiontest.PartitionTest(t)
- netA := makeTestWebsocketNode(t)
- netA.config.GossipFanout = 1
- netA.Start()
- defer netStop(t, netA, "A")
- netB := makeTestWebsocketNode(t)
- netB.config.GossipFanout = 1
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.Start()
- defer netStop(t, netB, "B")
- counter := newMessageCounter(t, 2)
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- t.Log("a ready")
- waitReady(t, netB, readyTimeout.C)
- t.Log("b ready")
+ netA, _, _, closeFunc := setupWebsocketNetworkAB(t, 2)
+ defer closeFunc()
require.Equal(t, 1, len(netA.peers))
require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn)))
@@ -463,27 +449,9 @@ func TestWebsocketPeerData(t *testing.T) {
func TestWebsocketNetworkArray(t *testing.T) {
partitiontest.PartitionTest(t)
- netA := makeTestWebsocketNode(t)
- netA.config.GossipFanout = 1
- netA.Start()
- defer netStop(t, netA, "A")
- netB := makeTestWebsocketNode(t)
- netB.config.GossipFanout = 1
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.Start()
- defer netStop(t, netB, "B")
- counter := newMessageCounter(t, 3)
+ netA, _, counter, closeFunc := setupWebsocketNetworkAB(t, 3)
+ defer closeFunc()
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- t.Log("a ready")
- waitReady(t, netB, readyTimeout.C)
- t.Log("b ready")
tags := []protocol.Tag{protocol.TxnTag, protocol.TxnTag, protocol.TxnTag}
data := [][]byte{[]byte("foo"), []byte("bar"), []byte("algo")}
@@ -500,27 +468,9 @@ func TestWebsocketNetworkArray(t *testing.T) {
func TestWebsocketNetworkCancel(t *testing.T) {
partitiontest.PartitionTest(t)
- netA := makeTestWebsocketNode(t)
- netA.config.GossipFanout = 1
- netA.Start()
- defer netStop(t, netA, "A")
- netB := makeTestWebsocketNode(t)
- netB.config.GossipFanout = 1
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.Start()
- defer netStop(t, netB, "B")
- counter := newMessageCounter(t, 100)
+ netA, _, counter, closeFunc := setupWebsocketNetworkAB(t, 100)
+ defer closeFunc()
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- t.Log("a ready")
- waitReady(t, netB, readyTimeout.C)
- t.Log("b ready")
tags := make([]protocol.Tag, 100)
data := make([][]byte, 100)
@@ -721,29 +671,15 @@ func TestAddrToGossipAddr(t *testing.T) {
type nopConn struct{}
-func (nc *nopConn) RemoteAddr() net.Addr {
- return nil
-}
-func (nc *nopConn) NextReader() (int, io.Reader, error) {
- return 0, nil, nil
-}
-func (nc *nopConn) WriteMessage(int, []byte) error {
- return nil
-}
-func (nc *nopConn) WriteControl(int, []byte, time.Time) error {
- return nil
-}
-func (nc *nopConn) SetReadLimit(limit int64) {
-}
-func (nc *nopConn) CloseWithoutFlush() error {
- return nil
-}
-func (nc *nopConn) SetPingHandler(h func(appData string) error) {
-
-}
-func (nc *nopConn) SetPongHandler(h func(appData string) error) {
-
-}
+func (nc *nopConn) RemoteAddr() net.Addr { return nil }
+func (nc *nopConn) NextReader() (int, io.Reader, error) { return 0, nil, nil }
+func (nc *nopConn) WriteMessage(int, []byte) error { return nil }
+func (nc *nopConn) WriteControl(int, []byte, time.Time) error { return nil }
+func (nc *nopConn) SetReadLimit(limit int64) {}
+func (nc *nopConn) CloseWithoutFlush() error { return nil }
+func (nc *nopConn) SetPingHandler(h func(appData string) error) {}
+func (nc *nopConn) SetPongHandler(h func(appData string) error) {}
+func (nc *nopConn) UnderlyingConn() net.Conn { return nil }
var nopConnSingleton = nopConn{}
@@ -2690,7 +2626,7 @@ func TestParseHostOrURL(t *testing.T) {
func TestPreparePeerData(t *testing.T) {
partitiontest.PartitionTest(t)
- // no comression
+ // no compression
req := broadcastRequest{
tags: []protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag},
data: [][]byte{[]byte("test"), []byte("data")},
@@ -2698,12 +2634,13 @@ func TestPreparePeerData(t *testing.T) {
peers := []*wsPeer{}
wn := WebsocketNetwork{}
- data, comp, digests := wn.preparePeerData(req, false, peers)
+ data, comp, digests, seenPrioPPTag := wn.preparePeerData(req, false, peers)
require.NotEmpty(t, data)
require.Empty(t, comp)
require.NotEmpty(t, digests)
require.Equal(t, len(req.data), len(digests))
require.Equal(t, len(data), len(digests))
+ require.False(t, seenPrioPPTag)
for i := range data {
require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), data[i])
@@ -2717,13 +2654,14 @@ func TestPreparePeerData(t *testing.T) {
features: pfCompressedProposal,
}
peers = []*wsPeer{&peer1, &peer2}
- data, comp, digests = wn.preparePeerData(req, true, peers)
+ data, comp, digests, seenPrioPPTag = wn.preparePeerData(req, true, peers)
require.NotEmpty(t, data)
require.NotEmpty(t, comp)
require.NotEmpty(t, digests)
require.Equal(t, len(req.data), len(digests))
require.Equal(t, len(data), len(digests))
require.Equal(t, len(comp), len(digests))
+ require.True(t, seenPrioPPTag)
for i := range data {
require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), data[i])
@@ -2739,3 +2677,69 @@ func TestPreparePeerData(t *testing.T) {
}
}
}
+
+func TestWebsocketNetworkTelemetryTCP(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" {
+ t.Skip("Flaky on CIRCLECI")
+ }
+
+ // start two networks and send 2 messages from A to B
+ closed := false
+ netA, netB, counter, closeFunc := setupWebsocketNetworkAB(t, 2)
+ defer func() {
+ if !closed {
+ closeFunc()
+ }
+ }()
+ counterDone := counter.done
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
+
+ select {
+ case <-counterDone:
+ case <-time.After(2 * time.Second):
+ t.Errorf("timeout, count=%d, wanted 2", counter.count)
+ }
+
+ // get RTT from both ends and assert nonzero
+ var peersA, peersB []*wsPeer
+ peersA, _ = netA.peerSnapshot(peersA)
+ detailsA := netA.getPeerConnectionTelemetryDetails(time.Now(), peersA)
+ peersB, _ = netB.peerSnapshot(peersB)
+ detailsB := netB.getPeerConnectionTelemetryDetails(time.Now(), peersB)
+ require.Len(t, detailsA.IncomingPeers, 1)
+ assert.NotZero(t, detailsA.IncomingPeers[0].TCP.RTT)
+ require.Len(t, detailsB.OutgoingPeers, 1)
+ assert.NotZero(t, detailsB.OutgoingPeers[0].TCP.RTT)
+
+ pcdA, err := json.Marshal(detailsA)
+ assert.NoError(t, err)
+ pcdB, err := json.Marshal(detailsB)
+ assert.NoError(t, err)
+ t.Log("detailsA", string(pcdA))
+ t.Log("detailsB", string(pcdB))
+
+ // close connections
+ closeFunc()
+ closed = true
+ // open more FDs by starting 2 more networks
+ _, _, _, closeFunc2 := setupWebsocketNetworkAB(t, 2)
+ defer closeFunc2()
+ // use stale peers snapshot from closed networks to get telemetry
+ // *net.OpError "use of closed network connection" err results in 0 rtt values
+ detailsA = netA.getPeerConnectionTelemetryDetails(time.Now(), peersA)
+ detailsB = netB.getPeerConnectionTelemetryDetails(time.Now(), peersB)
+ require.Len(t, detailsA.IncomingPeers, 1)
+ assert.Zero(t, detailsA.IncomingPeers[0].TCP.RTT)
+ require.Len(t, detailsB.OutgoingPeers, 1)
+ assert.Zero(t, detailsB.OutgoingPeers[0].TCP.RTT)
+
+ pcdA, err = json.Marshal(detailsA)
+ assert.NoError(t, err)
+ pcdB, err = json.Marshal(detailsB)
+ assert.NoError(t, err)
+ t.Log("closed detailsA", string(pcdA))
+ t.Log("closed detailsB", string(pcdB))
+}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index b16345576..94a1bd2b7 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -124,6 +124,11 @@ type wsPeerWebsocketConn interface {
CloseWithoutFlush() error
SetPingHandler(h func(appData string) error)
SetPongHandler(h func(appData string) error)
+ wrappedConn
+}
+
+type wrappedConn interface {
+ UnderlyingConn() net.Conn
}
type sendMessage struct {
@@ -180,6 +185,14 @@ type wsPeer struct {
// Nonce used to uniquely identify requests
requestNonce uint64
+ // duplicateFilterCount counts how many times the remote peer has sent us a message hash
+ // to filter that it had already sent before.
+ // this needs to be 64-bit aligned for use with atomic.AddUint64 on 32-bit platforms.
+ duplicateFilterCount uint64
+
+ // These message counters need to be 64-bit aligned as well.
+ txMessageCount, miMessageCount, ppMessageCount, avMessageCount uint64
+
wsPeerCore
// conn will be *websocket.Conn (except in testing)
@@ -203,9 +216,6 @@ type wsPeer struct {
incomingMsgFilter *messageFilter
outgoingMsgFilter *messageFilter
- // duplicateFilterCount counts how many times the remote peer has sent us a message hash
- // to filter that it had already sent before.
- duplicateFilterCount int64
processed chan struct{}
@@ -489,6 +499,7 @@ func (wp *wsPeer) readLoop() {
switch msg.Tag {
case protocol.MsgOfInterestTag:
// try to decode the message-of-interest
+ atomic.AddUint64(&wp.miMessageCount, 1)
if wp.handleMessageOfInterest(msg) {
return
}
@@ -515,13 +526,19 @@ func (wp *wsPeer) readLoop() {
case channel <- &Response{Topics: topics}:
// do nothing. writing was successful.
default:
- wp.net.log.Warnf("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
+ wp.net.log.Warn("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
}
continue
case protocol.MsgDigestSkipTag:
// network maintenance message handled immediately instead of handing off to general handlers
wp.handleFilterMessage(msg)
continue
+ case protocol.TxnTag:
+ atomic.AddUint64(&wp.txMessageCount, 1)
+ case protocol.AgreementVoteTag:
+ atomic.AddUint64(&wp.avMessageCount, 1)
+ case protocol.ProposalPayloadTag:
+ atomic.AddUint64(&wp.ppMessageCount, 1)
}
if len(msg.Data) > 0 && wp.incomingMsgFilter != nil && dedupSafeTag(msg.Tag) {
if wp.incomingMsgFilter.CheckIncomingMessage(msg.Tag, msg.Data, true, true) {
@@ -614,7 +631,7 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
// large message concurrently from several peers, and then sent the filter message to us after
// each large message finished transferring.
duplicateNetworkFilterReceivedTotal.Inc(nil)
- atomic.AddInt64(&wp.duplicateFilterCount, 1)
+ atomic.AddUint64(&wp.duplicateFilterCount, 1)
}
}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index d61c182d3..2798a5256 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -103,6 +103,7 @@ func TestAtomicVariablesAlignment(t *testing.T) {
require.True(t, (unsafe.Offsetof(p.requestNonce)%8) == 0)
require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
+ require.True(t, (unsafe.Offsetof(p.duplicateFilterCount)%8) == 0)
}
func TestTagCounterFiltering(t *testing.T) {
diff --git a/node/node.go b/node/node.go
index a21aad188..c1617f2ba 100644
--- a/node/node.go
+++ b/node/node.go
@@ -84,6 +84,9 @@ type StatusReport struct {
CatchpointCatchupTotalAccounts uint64
CatchpointCatchupProcessedAccounts uint64
CatchpointCatchupVerifiedAccounts uint64
+ CatchpointCatchupTotalKVs uint64
+ CatchpointCatchupProcessedKVs uint64
+ CatchpointCatchupVerifiedKVs uint64
CatchpointCatchupTotalBlocks uint64
CatchpointCatchupAcquiredBlocks uint64
}
@@ -385,8 +388,9 @@ func (node *AlgorandFullNode) startMonitoringRoutines() {
// Delete old participation keys
go node.oldKeyDeletionThread(node.ctx.Done())
- // TODO re-enable with configuration flag post V1
- //go logging.UsageLogThread(node.ctx, node.log, 100*time.Millisecond, nil)
+ if node.config.EnableUsageLog {
+ go logging.UsageLogThread(node.ctx, node.log, 100*time.Millisecond, nil)
+ }
}
// waitMonitoringRoutines waits for all the monitoring routines to exit. Note that
@@ -674,6 +678,9 @@ func (node *AlgorandFullNode) Status() (s StatusReport, err error) {
s.CatchpointCatchupTotalAccounts = stats.TotalAccounts
s.CatchpointCatchupProcessedAccounts = stats.ProcessedAccounts
s.CatchpointCatchupVerifiedAccounts = stats.VerifiedAccounts
+ s.CatchpointCatchupTotalKVs = stats.TotalKVs
+ s.CatchpointCatchupProcessedKVs = stats.ProcessedKVs
+ s.CatchpointCatchupVerifiedKVs = stats.VerifiedKVs
s.CatchpointCatchupTotalBlocks = stats.TotalBlocks
s.CatchpointCatchupAcquiredBlocks = stats.AcquiredBlocks
s.CatchupTime = time.Now().Sub(stats.StartTime)
@@ -941,7 +948,7 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
renamedFileName := filepath.Join(fullname, ".old")
err = os.Rename(fullname, renamedFileName)
if err != nil {
- node.log.Warn("loadParticipationKeys: failed to rename unsupported participation key file '%s' to '%s': %v", fullname, renamedFileName, err)
+ node.log.Warnf("loadParticipationKeys: failed to rename unsupported participation key file '%s' to '%s': %v", fullname, renamedFileName, err)
}
} else {
return fmt.Errorf("AlgorandFullNode.loadParticipationKeys: cannot load account at %v: %v", info.Name(), err)
@@ -1070,7 +1077,7 @@ func (node *AlgorandFullNode) oldKeyDeletionThread(done <-chan struct{}) {
// Persist participation registry updates to last-used round and voting key changes.
err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
if err != nil {
- node.log.Warnf("error while flushing the registry: %w", err)
+ node.log.Warnf("error while flushing the registry: %v", err)
}
}
}
diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go
index f40270039..8d784a069 100644
--- a/protocol/codec_tester.go
+++ b/protocol/codec_tester.go
@@ -241,7 +241,14 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
*remainingChanges--
case reflect.String:
var buf []byte
- len := rand.Int() % 64
+ var len int
+ if strings.HasSuffix(v.Type().PkgPath(), "go-algorand/agreement") && v.Type().Name() == "serializableError" {
+ // Don't generate empty strings for serializableError since nil values of *string type
+ // will serialize differently by msgp and go-codec
+ len = rand.Int()%63 + 1
+ } else {
+ len = rand.Int() % 64
+ }
for i := 0; i < len; i++ {
buf = append(buf, byte(rand.Uint32()))
}
@@ -270,6 +277,10 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
// unexported
continue
}
+ if st.Name() == "messageEvent" && f.Name == "Tail" {
+ // Don't try and set the Tail field since it's recursive
+ continue
+ }
if rawMsgpType == f.Type {
return errSkipRawMsgpTesting
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index cd03519fb..f9ee2ec0c 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -192,6 +192,11 @@ const ConsensusV35 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/433d8e9a7274b6fca703d91213e05c7e6a589e69",
)
+// ConsensusV36 adds box storage
+const ConsensusV36 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/44fa607d6051730f5264526bf3c108d51f0eadb6",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -212,13 +217,16 @@ const ConsensusVAlpha3 = ConsensusVersion("alpha3")
// ConsensusVAlpha4 uses the same parameters as ConsensusV34.
const ConsensusVAlpha4 = ConsensusVersion("alpha4")
+// ConsensusVAlpha5 uses the same parameters as ConsensusV36.
+const ConsensusVAlpha5 = ConsensusVersion("alpha5")
+
// !!! ********************* !!!
// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!!
// !!! ********************* !!!
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV35
+const ConsensusCurrentVersion = ConsensusV36
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/rpcs/txService.go b/rpcs/txService.go
index e621d6541..654a8e69f 100644
--- a/rpcs/txService.go
+++ b/rpcs/txService.go
@@ -143,7 +143,7 @@ func (txs *TxService) ServeHTTP(response http.ResponseWriter, request *http.Requ
response.WriteHeader(http.StatusOK)
_, err = response.Write(txblob)
if err != nil {
- txs.log.Warnf("http block write failed ", err)
+ txs.log.Warn("http block write failed", err)
}
}
diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go
index 4db86d10c..70e13fc22 100644
--- a/rpcs/txService_test.go
+++ b/rpcs/txService_test.go
@@ -17,6 +17,7 @@
package rpcs
import (
+ "context"
"net"
"net/http"
"net/url"
@@ -90,7 +91,7 @@ func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler)
func (b *basicRPCNode) start() bool {
var err error
- b.listener, err = net.Listen("tcp", "")
+ b.listener, err = net.Listen("tcp", "127.0.0.1:")
if err != nil {
logging.Base().Error("tcp listen", err)
return false
@@ -149,6 +150,8 @@ func TestTxSync(t *testing.T) {
syncTimeout := time.Second
syncerPool := makeMockPendingTxAggregate(0)
syncer := MakeTxSyncer(syncerPool, nodeB, &handler, syncInterval, syncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
require.NoError(t, syncer.sync())
require.Equal(t, int32(3), atomic.LoadInt32(&handler.messageCounter))
}
diff --git a/rpcs/txSyncer.go b/rpcs/txSyncer.go
index cbeeb27fc..399637169 100644
--- a/rpcs/txSyncer.go
+++ b/rpcs/txSyncer.go
@@ -59,13 +59,10 @@ type TxSyncer struct {
// MakeTxSyncer returns a TxSyncer
func MakeTxSyncer(pool PendingTxAggregate, clientSource network.GossipNode, txHandler data.SolicitedTxHandler, syncInterval time.Duration, syncTimeout time.Duration, serverResponseSize int) *TxSyncer {
- ctx, cancel := context.WithCancel(context.Background())
return &TxSyncer{
pool: pool,
clientSource: clientSource,
handler: txHandler,
- ctx: ctx,
- cancel: cancel,
syncInterval: syncInterval,
syncTimeout: syncTimeout,
log: logging.Base(),
@@ -76,6 +73,7 @@ func MakeTxSyncer(pool PendingTxAggregate, clientSource network.GossipNode, txHa
// Start begins periodically syncing after the canStart chanel indicates it can begin
func (syncer *TxSyncer) Start(canStart chan struct{}) {
syncer.wg.Add(1)
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
go func() {
defer syncer.wg.Done()
select {
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index 0d09bb087..377080ae8 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -196,6 +196,8 @@ func TestSyncFromClient(t *testing.T) {
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(clientPool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.syncFromClient(&client))
@@ -211,6 +213,8 @@ func TestSyncFromUnsupportedClient(t *testing.T) {
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
@@ -226,6 +230,8 @@ func TestSyncFromClientAndQuit(t *testing.T) {
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
syncer.cancel()
require.Error(t, syncer.syncFromClient(&client))
@@ -241,6 +247,8 @@ func TestSyncFromClientAndError(t *testing.T) {
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
@@ -256,6 +264,8 @@ func TestSyncFromClientAndTimeout(t *testing.T) {
handler := mockHandler{}
syncTimeout := time.Duration(0)
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, syncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
@@ -277,6 +287,8 @@ func TestSync(t *testing.T) {
handler := mockHandler{}
syncerPool := makeMockPendingTxAggregate(3)
syncer := MakeTxSyncer(syncerPool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.sync())
@@ -290,6 +302,8 @@ func TestNoClientsSync(t *testing.T) {
clientAgg := mockClientAggregator{peers: []network.Peer{}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
+ // Since syncer is not Started, set the context here
+ syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.sync())
diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions
index 04960db22..01e0e9006 100644
--- a/scripts/buildtools/versions
+++ b/scripts/buildtools/versions
@@ -1,7 +1,7 @@
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
golang.org/x/tools v0.1.5
-github.com/algorand/msgp v1.1.52
-github.com/algorand/oapi-codegen v1.3.7
+github.com/algorand/msgp v1.1.53
+github.com/algorand/oapi-codegen v1.12.0-algorand.0
github.com/go-swagger/go-swagger v0.25.0
gotest.tools/gotestsum v1.6.4
github.com/golangci/golangci-lint/cmd/golangci-lint v1.47.3
diff --git a/scripts/dump_genesis.sh b/scripts/dump_genesis.sh
index 3ee876554..386924588 100755
--- a/scripts/dump_genesis.sh
+++ b/scripts/dump_genesis.sh
@@ -76,6 +76,9 @@ for LEDGER in $LEDGERS; do
unfinishedcatchpoints)
SORT=round
;;
+ kvstore)
+ SORT=key
+ ;;
*)
echo "Unknown table $T" >&2
exit 1
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 610a0e9b5..982c768aa 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -30,7 +30,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/passphrase"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -163,10 +163,10 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp)
}
if pps.cinfo.AssetParams == nil {
- pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ pps.cinfo.AssetParams = make(map[uint64]model.AssetParams, pps.cfg.NumAsset)
}
if pps.cinfo.AppParams == nil {
- pps.cinfo.AppParams = make(map[uint64]v1.AppParams, pps.cfg.NumApp)
+ pps.cinfo.AppParams = make(map[uint64]model.ApplicationParams, pps.cfg.NumApp)
}
sources := make([]<-chan *crypto.SignatureSecrets, 0, 2)
@@ -197,9 +197,7 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
srcAcctPresent = true
}
- // TODO: switch to v2 API
- //ai, err := ac.AccountInformationV2(addr, false)
- ai, err := ac.AccountInformation(addr)
+ ai, err := ac.AccountInformation(addr, true)
if err != nil {
return err
}
@@ -247,36 +245,48 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
return
}
-func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, ai v1.Account) {
+func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, ai model.Account) {
ppa.balance = ai.Amount
// assets this account has created
- for assetID, ap := range ai.AssetParams {
- pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
- pps.cinfo.AssetParams[assetID] = ap
+ if ai.CreatedAssets != nil {
+ for _, ap := range *ai.CreatedAssets {
+ assetID := ap.Index
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ pps.cinfo.AssetParams[assetID] = ap.Params
+ }
}
// assets held
- for assetID, holding := range ai.Assets {
- pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
- if ppa.holdings == nil {
- ppa.holdings = make(map[uint64]uint64)
+ if ai.Assets != nil {
+ for _, holding := range *ai.Assets {
+ assetID := holding.AssetID
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if ppa.holdings == nil {
+ ppa.holdings = make(map[uint64]uint64)
+ }
+ ppa.holdings[assetID] = holding.Amount
}
- ppa.holdings[assetID] = holding.Amount
}
// apps created by this account
- for appID, ap := range ai.AppParams {
- pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
- pps.cinfo.AppParams[appID] = ap
+ if ai.CreatedApps != nil {
+ for _, ap := range *ai.CreatedApps {
+ appID := ap.Id
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ pps.cinfo.AppParams[appID] = ap.Params
+ }
}
// apps opted into
- for appID := range ai.AppLocalStates {
- pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ if ai.AppsLocalState != nil {
+ for _, localState := range *ai.AppsLocalState {
+ appID := localState.Id
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ }
}
}
type assetopti struct {
assetID uint64
- params v1.AssetParams // TODO: switch to v2 API
- optins []string // addr strings
+ params model.AssetParams
+ optins []string // addr strings
}
type assetSet []assetopti
@@ -301,7 +311,7 @@ func (as *assetSet) Swap(a, b int) {
func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) {
if pps.cinfo.AssetParams == nil {
- pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams)
+ pps.cinfo.AssetParams = make(map[uint64]model.AssetParams)
}
if pps.cinfo.OptIns == nil {
pps.cinfo.OptIns = make(map[uint64][]string)
@@ -326,7 +336,7 @@ func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) {
sort.Sort(&ta)
if len(assets) > int(pps.cfg.NumAsset) {
assets = assets[:pps.cfg.NumAsset]
- nap := make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ nap := make(map[uint64]model.AssetParams, pps.cfg.NumAsset)
for _, asset := range assets {
nap[asset.assetID] = asset.params
}
@@ -407,30 +417,35 @@ func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) {
newAssetAddrs[addr] = acct
}
// wait for new assets to be created, fetch account data for them
- newAssets := make(map[uint64]v1.AssetParams, assetsNeeded)
+ newAssets := make(map[uint64]model.AssetParams, assetsNeeded)
timeout := time.Now().Add(10 * time.Second)
for len(newAssets) < assetsNeeded {
for addr, acct := range newAssetAddrs {
- // TODO: switch to v2 API
- ai, err := client.AccountInformation(addr)
+ ai, err := client.AccountInformation(addr, true)
if err != nil {
fmt.Printf("Warning: cannot lookup source account after assets creation")
time.Sleep(1 * time.Second)
continue
}
- for assetID, ap := range ai.AssetParams {
- pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
- _, has := pps.cinfo.AssetParams[assetID]
- if !has {
- newAssets[assetID] = ap
+ if ai.CreatedAssets != nil {
+ for _, ap := range *ai.CreatedAssets {
+ assetID := ap.Index
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ _, has := pps.cinfo.AssetParams[assetID]
+ if !has {
+ newAssets[assetID] = ap.Params
+ }
}
}
- for assetID, holding := range ai.Assets {
- pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
- if acct.holdings == nil {
- acct.holdings = make(map[uint64]uint64)
+ if ai.Assets != nil {
+ for _, holding := range *ai.Assets {
+ assetID := holding.AssetID
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if acct.holdings == nil {
+ acct.holdings = make(map[uint64]uint64)
+ }
+ acct.holdings[assetID] = holding.Amount
}
- acct.holdings[assetID] = holding.Amount
}
}
if time.Now().After(timeout) {
@@ -479,7 +494,67 @@ func genBigNoOpAndBigHashes(numOps uint32, numHashes uint32, hashSize string) []
return ops.Program
}
-func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKeys uint32, numLocalKeys uint32) ([]byte, string) {
+func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKeys, numLocalKeys, numBoxUpdate, numBoxRead uint32) ([]byte, string) {
+ if numBoxUpdate != 0 || numBoxRead != 0 {
+ prologue := `#pragma version 8
+ txn ApplicationID
+ bz done
+ `
+ createBoxes := `
+ byte "%d"
+ int 1024
+ box_create
+ pop
+ `
+ updateBoxes := `
+ byte "%d"
+ int 0
+ byte "1"
+ box_replace
+ `
+ getBoxes := `
+ byte "%d"
+ box_get
+ assert
+ pop
+ `
+ done := `
+ done:
+ int 1
+ return
+ `
+
+ progParts := []string{prologue}
+
+ // note: only one of numBoxUpdate or numBoxRead should be nonzero
+ if numBoxUpdate != 0 {
+ for i := uint32(0); i < numBoxUpdate; i++ {
+ progParts = append(progParts, fmt.Sprintf(createBoxes, i))
+ }
+
+ for i := uint32(0); i < numBoxUpdate; i++ {
+ progParts = append(progParts, fmt.Sprintf(updateBoxes, i))
+ }
+ } else {
+ for i := uint32(0); i < numBoxRead; i++ {
+ progParts = append(progParts, fmt.Sprintf(createBoxes, i))
+ }
+
+ for i := uint32(0); i < numBoxRead; i++ {
+ progParts = append(progParts, fmt.Sprintf(getBoxes, i))
+ }
+ }
+ progParts = append(progParts, done)
+
+ // assemble
+ progAsm := strings.Join(progParts, "\n")
+ ops, err := logic.AssembleString(progAsm)
+ if err != nil {
+ panic(err)
+ }
+ return ops.Program, progAsm
+ }
+
prologueSize := uint32(2 + 3 + 2 + 1 + 1 + 3)
prologue := `#pragma version 2
txn ApplicationID
@@ -677,7 +752,7 @@ func getProto(client *libgoal.Client) (config.ConsensusParams, error) {
// ensure that cfg.NumPartAccounts have cfg.NumAppOptIn opted in selecting from cfg.NumApp
func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
if pps.cinfo.AppParams == nil {
- pps.cinfo.AppParams = make(map[uint64]v1.AppParams)
+ pps.cinfo.AppParams = make(map[uint64]model.ApplicationParams)
}
if pps.cinfo.OptIns == nil {
@@ -732,27 +807,28 @@ func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
// update pps.cinfo.AppParams to ensure newly created apps are present
for _, addr := range newAppAddrs {
- var ai v1.Account
+ var ai model.Account
for {
- ai, err = client.AccountInformation(addr)
+ ai, err = client.AccountInformation(addr, true)
if err != nil {
fmt.Printf("Warning, cannot lookup source account")
return
}
- if len(ai.AppParams) >= appsPerAddr[addr] {
+ if ai.CreatedApps != nil && len(*ai.CreatedApps) >= appsPerAddr[addr] {
break
}
waitForNextRoundOrSleep(client, 500*time.Millisecond)
// TODO : if we fail here for too long, we should re-create new accounts, etc.
}
- ai, err = client.AccountInformation(addr)
+ ai, err = client.AccountInformation(addr, true)
if err != nil {
return
}
- for appID, ap := range ai.AppParams {
+ for _, ap := range *ai.CreatedApps {
+ appID := ap.Id
pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
- pps.cinfo.AppParams[appID] = ap
+ pps.cinfo.AppParams[appID] = ap.Params
}
}
@@ -794,19 +870,28 @@ func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
//txgroup = txgroup[:0]
//senders = senders[:0]
}
+
+ for appid := range pps.cinfo.AppParams {
+ // use source account to fund all apps
+ err = pps.appFundFromSourceAccount(appid, client)
+ if err != nil {
+ return
+ }
+ }
+
return
}
func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transactions.Transaction, err error) {
// generate app program with roughly some number of operations
- prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys)
+ prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys, pps.cfg.NumBoxUpdate, pps.cfg.NumBoxRead)
if !pps.cfg.Quiet {
fmt.Printf("generated program: \n%s\n", asm)
}
globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, nil, 0)
if err != nil {
fmt.Printf("Cannot create app txn\n")
panic(err)
@@ -827,7 +912,7 @@ func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transact
}
func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Client) (tx transactions.Transaction, err error) {
- tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil, nil)
if err != nil {
fmt.Printf("Cannot create app txn\n")
panic(err)
@@ -844,6 +929,35 @@ func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Clie
return
}
+func (pps *WorkerState) appFundFromSourceAccount(appID uint64, client *libgoal.Client) (err error) {
+ // currently, apps only need to be funded if boxes are used
+ if pps.getNumBoxes() > 0 {
+ var srcFunds uint64
+ srcFunds, err = client.GetBalance(pps.cfg.SrcAccount)
+ if err != nil {
+ return err
+ }
+
+ appAddr := basics.AppIndex(appID).Address()
+ mbr := proto.MinBalance +
+ proto.BoxFlatMinBalance*uint64(pps.getNumBoxes()) +
+ proto.BoxByteMinBalance*(proto.MaxBoxSize+uint64(proto.MaxAppKeyLen))*uint64(pps.getNumBoxes())
+
+ pps.schedule(1)
+ var txn transactions.Transaction
+ txn, err = pps.sendPaymentFromSourceAccount(client, appAddr.String(), 0, mbr, pps.accounts[pps.cfg.SrcAccount])
+ if err != nil {
+ return err
+ }
+
+ srcFunds -= mbr
+ srcFunds -= txn.Fee.Raw
+ pps.accounts[pps.cfg.SrcAccount].setBalance(srcFunds)
+ }
+
+ return nil
+}
+
func takeTopAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32, srcAccount string) (accounts map[string]*pingPongAccount) {
allAddrs := make([]string, len(allAccounts))
var i int
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 8e406d255..b5dd17bc5 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -36,6 +36,7 @@ type PpConfig struct {
RandomizeFee bool
RandomizeAmt bool
RandomizeDst bool
+ MaxRandomDst uint64
MaxFee uint64
MinFee uint64
MaxAmt uint64
@@ -60,7 +61,11 @@ type PpConfig struct {
// NumApp is the total number of apps to create
NumApp uint32
// NumAppOptIn is the number of apps each account opts in to
- NumAppOptIn uint32
+ NumAppOptIn uint32
+ // NumBoxUpdate is the number of boxes used per app, where box values are updated each call
+ NumBoxUpdate uint32
+ // NumBoxRead is the number of boxes used per app, where box values are only read each call
+ NumBoxRead uint32
AppProgOps uint32
AppProgHashes uint32
AppProgHashSize string
@@ -94,6 +99,7 @@ var DefaultConfig = PpConfig{
RandomizeFee: false,
RandomizeAmt: false,
RandomizeDst: false,
+ MaxRandomDst: 200000,
MaxFee: 10000,
MinFee: 1000,
MaxAmt: 1000,
@@ -106,6 +112,8 @@ var DefaultConfig = PpConfig{
NumAsset: 0,
MinAccountAsset: 10000000,
NumApp: 0,
+ NumBoxUpdate: 0,
+ NumBoxRead: 0,
AppProgOps: 0,
AppProgHashes: 0,
AppProgHashSize: "sha256",
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index e0ee6812a..c6a24f493 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -32,7 +32,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -42,8 +42,8 @@ import (
// CreatablesInfo has information about created assets, apps and opting in
type CreatablesInfo struct {
- AssetParams map[uint64]v1.AssetParams
- AppParams map[uint64]v1.AppParams
+ AssetParams map[uint64]model.AssetParams
+ AppParams map[uint64]model.ApplicationParams
OptIns map[uint64][]string
}
@@ -130,9 +130,10 @@ func (ppa *pingPongAccount) String() string {
// WorkerState object holds a running pingpong worker
type WorkerState struct {
- cfg PpConfig
- accounts map[string]*pingPongAccount
- cinfo CreatablesInfo
+ cfg PpConfig
+ accounts map[string]*pingPongAccount
+ randomAccounts []string
+ cinfo CreatablesInfo
nftStartTime int64
localNftIndex uint64
@@ -150,6 +151,17 @@ type WorkerState struct {
client *libgoal.Client
}
+// returns the number of boxes per app
+func (pps *WorkerState) getNumBoxes() uint32 {
+ // only one of NumBoxUpdate and NumBoxRead should be nonzero. There isn't
+ // currently support for mixed box workloads so these numbers should not be
+ // added together.
+ if pps.cfg.NumBoxUpdate > 0 {
+ return pps.cfg.NumBoxUpdate
+ }
+ return pps.cfg.NumBoxRead
+}
+
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
func (pps *WorkerState) PrepareAccounts(ac *libgoal.Client) (err error) {
pps.client = ac
@@ -267,7 +279,7 @@ func (pps *WorkerState) scheduleAction() bool {
pps.refreshPos = 0
}
addr := pps.refreshAddrs[pps.refreshPos]
- ai, err := pps.client.AccountInformation(addr)
+ ai, err := pps.client.AccountInformation(addr, true)
if err == nil {
ppa := pps.accounts[addr]
@@ -442,19 +454,19 @@ func (pps *WorkerState) sendPaymentFromSourceAccount(client *libgoal.Client, to
}
// waitPendingTransactions waits until all the pending transactions coming from the given
-// accounts map have been cleared out of the transaction pool. A prerequesite for this is that
+// accounts map have been cleared out of the transaction pool. A prerequisite for this is that
// there is no other source who might be generating transactions that would come from these account
// addresses.
func waitPendingTransactions(accounts []string, client *libgoal.Client) error {
for _, from := range accounts {
repeat:
- pendingTxns, err := client.GetPendingTransactionsByAddress(from, 0)
+ pendingTxns, err := client.GetParsedPendingTransactionsByAddress(from, 0)
if err != nil {
fmt.Printf("failed to check pending transaction pool status : %v\n", err)
return err
}
- for _, txn := range pendingTxns.TruncatedTxns.Transactions {
- if txn.From != from {
+ for _, txn := range pendingTxns.TopTransactions {
+ if txn.Txn.Sender.String() != from {
// we found a transaction where the receiver was the given account. We don't
// care about these.
continue
@@ -622,7 +634,11 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac *libgoal.Client) {
// NewPingpong creates a new pingpong WorkerState
func NewPingpong(cfg PpConfig) *WorkerState {
- return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)}
+ return &WorkerState{
+ cfg: cfg,
+ nftHolders: make(map[string]int),
+ randomAccounts: make([]string, 0, cfg.MaxRandomDst),
+ }
}
func (pps *WorkerState) randAssetID() (aidx uint64) {
@@ -692,11 +708,7 @@ func (pps *WorkerState) sendFromTo(
fee := pps.fee()
to := toList[i]
- if pps.cfg.RandomizeDst {
- var addr basics.Address
- crypto.RandBytes(addr[:])
- to = addr.String()
- } else if len(belowMinBalanceAccounts) > 0 && (crypto.RandUint64()%100 < 50) {
+ if len(belowMinBalanceAccounts) > 0 && (crypto.RandUint64()%100 < 50) {
// make 50% of the calls attempt to refund low-balanced accounts.
// ( if there is any )
// pick the first low balance account
@@ -704,6 +716,20 @@ func (pps *WorkerState) sendFromTo(
to = acct
break
}
+ } else if pps.cfg.RandomizeDst {
+ // check if we need to create a new random account, or use an existing one
+ if uint64(len(pps.randomAccounts)) >= pps.cfg.MaxRandomDst {
+ // use pre-created random account
+ i := rand.Int63n(int64(len(pps.randomAccounts)))
+ to = pps.randomAccounts[i]
+ } else {
+ // create new random account
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+ to = addr.String()
+ // push new account
+ pps.randomAccounts = append(pps.randomAccounts, to)
+ }
}
// Broadcast transaction
@@ -959,7 +985,11 @@ type paymentUpdate struct {
func (au *paymentUpdate) apply(pps *WorkerState) {
pps.accounts[au.from].balance -= (au.fee + au.amt)
- pps.accounts[au.to].balance += au.amt
+ // update account balance
+ to := pps.accounts[au.to]
+ if to != nil {
+ to.balance += au.amt
+ }
}
// return true with probability 1/i
@@ -1099,6 +1129,13 @@ func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *lib
err = fmt.Errorf("no known apps")
return
}
+
+ // construct box ref array
+ var boxRefs []transactions.BoxRef
+ for i := uint32(0); i < pps.getNumBoxes(); i++ {
+ boxRefs = append(boxRefs, transactions.BoxRef{Index: 0, Name: []byte{fmt.Sprintf("%d", i)[0]}})
+ }
+
appOptIns := pps.cinfo.OptIns[aidx]
sender = from
if len(appOptIns) > 0 {
@@ -1128,7 +1165,7 @@ func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *lib
}
accounts = accounts[1:]
}
- txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
+ txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil, boxRefs)
if err != nil {
return
}
diff --git a/stateproof/builder.go b/stateproof/builder.go
index fd800ebaf..3d14c1f74 100644
--- a/stateproof/builder.go
+++ b/stateproof/builder.go
@@ -98,7 +98,7 @@ func (spw *Worker) initBuilders() {
return
})
if err != nil {
- spw.log.Warnf("initBuilders: getPendingSigs: %w", err)
+ spw.log.Warnf("initBuilders: getPendingSigs: %v", err)
return
}
@@ -128,7 +128,7 @@ func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
isPresent, err := builderForRound.Present(pos)
if err != nil {
- spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %w ", pos, err)
+ spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %v", pos, err)
continue
}
if isPresent {
@@ -141,7 +141,7 @@ func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
continue
}
if err := builderForRound.Add(pos, sig.sig); err != nil {
- spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %w", err)
+ spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %v", err)
continue
}
}
@@ -407,7 +407,7 @@ func (spw *Worker) tryBroadcast() {
sp, err := b.Build()
if err != nil {
- spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %w", rnd, err)
+ spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %v", rnd, err)
continue
}
diff --git a/test/commandandcontrol/cc_agent/main.go b/test/commandandcontrol/cc_agent/main.go
index a64ca804e..136c38f0d 100644
--- a/test/commandandcontrol/cc_agent/main.go
+++ b/test/commandandcontrol/cc_agent/main.go
@@ -120,7 +120,7 @@ func main() {
serverWs, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
- log.Errorf("dial:", err)
+ log.Error("dial:", err)
}
serverWs.Unsafe = true
defer func() {
@@ -168,7 +168,7 @@ func main() {
case t := <-ticker.C:
err := serverWs.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("heartbeat from agent %s with time %s", component.GetHostAgent().Host.Name, t.String())))
if err != nil {
- log.Errorf("write:", err)
+ log.Error("write:", err)
return
}
case <-interrupt:
@@ -177,7 +177,7 @@ func main() {
// waiting (with timeout) for the server to close the connection.
err := serverWs.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
- log.Errorf("write close:", err)
+ log.Error("write close:", err)
return
}
select {
diff --git a/test/commandandcontrol/cc_client/main.go b/test/commandandcontrol/cc_client/main.go
index 817afb850..7125ecd31 100644
--- a/test/commandandcontrol/cc_client/main.go
+++ b/test/commandandcontrol/cc_client/main.go
@@ -129,7 +129,7 @@ func main() {
func closeServiceConnection(serverWs *websocket.Conn) {
err := serverWs.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
- log.Errorf("write close:", err)
+ log.Error("write close:", err)
return
}
}
diff --git a/test/commandandcontrol/cc_service/main.go b/test/commandandcontrol/cc_service/main.go
index a95130320..26e592e66 100644
--- a/test/commandandcontrol/cc_service/main.go
+++ b/test/commandandcontrol/cc_service/main.go
@@ -56,7 +56,7 @@ func main() {
func handleClientConnections(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
- log.Errorf("upgrade:", err)
+ log.Error("upgrade:", err)
return
}
ws.Unsafe = true
@@ -72,7 +72,7 @@ func handleAgentConnections(w http.ResponseWriter, r *http.Request) {
// Upgrade initial GET request to a websocket
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
- log.Errorf("problem initializing agent web socket", err)
+ log.Error("problem initializing agent web socket", err)
return
}
ws.Unsafe = true
diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go
index 851eb1913..9f112fbeb 100644
--- a/test/e2e-go/cli/goal/clerk_test.go
+++ b/test/e2e-go/cli/goal/clerk_test.go
@@ -71,7 +71,7 @@ func TestClerkSendNoteEncoding(t *testing.T) {
tx1, err := fixture.WaitForConfirmedTxn(status.LastRound+i, account, txID)
if err == nil {
foundTx1 = true
- a.Equal(noteText, string(tx1.Note))
+ a.Equal(noteText, string(tx1.Txn.Txn.Note))
}
}
if !foundTx2 {
@@ -80,7 +80,7 @@ func TestClerkSendNoteEncoding(t *testing.T) {
foundTx2 = true
// If the note matches our original text, then goal is still expecting strings encoded
// with StdEncoding.EncodeToString() when using --noteb64 parameter
- a.Equal(originalNoteb64Text, string(tx2.Note), "goal should decode noteb64 with base64.StdEncoding")
+ a.Equal(originalNoteb64Text, string(tx2.Txn.Txn.Note), "goal should decode noteb64 with base64.StdEncoding")
}
}
}
diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go
index 679d2728a..b3d6c107a 100644
--- a/test/e2e-go/features/accountPerf/sixMillion_test.go
+++ b/test/e2e-go/features/accountPerf/sixMillion_test.go
@@ -35,7 +35,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
clientApi "github.com/algorand/go-algorand/daemon/algod/api/client"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -115,10 +115,10 @@ func getAccountInformation(
expectedCountAssets uint64,
address string,
context string,
- log logging.Logger) (info generated.Account, err error) {
+ log logging.Logger) (info model.Account, err error) {
for x := 0; x < 5; x++ { // retry only 5 times
- info, err = fixture.AlgodClient.AccountInformationV2(address, true)
+ info, err = fixture.AlgodClient.AccountInformation(address, true)
if err != nil {
return
}
@@ -138,7 +138,7 @@ func getAccountInformation(
func getAccountApplicationInformation(
fixture *fixtures.RestClientFixture,
address string,
- appID uint64) (appInfo generated.AccountApplicationResponse, err error) {
+ appID uint64) (appInfo model.AccountApplicationResponse, err error) {
appInfo, err = fixture.AlgodClient.AccountApplicationInformation(address, appID)
return
@@ -249,6 +249,7 @@ func test5MAssets(t *testing.T, scenario int) {
ba := generateKeys(1)
baseAcct := ba[0]
sender, err := basics.UnmarshalChecksumAddress(wAcct)
+ require.NoError(t, err)
satxn := sendAlgoTransaction(t, 0, sender, baseAcct.pk, 1000000000000000, 1, genesisHash)
err = signAndBroadcastTransaction(0, &satxn, fixture.LibGoalClient, &fixture)
require.NoError(t, err)
@@ -542,7 +543,7 @@ func scenarioA(
ownAllAccount.pk,
tLife,
genesisHash,
- basics.AssetIndex(asset.AssetId),
+ basics.AssetIndex(asset.AssetID),
ownAllAccount.pk,
uint64(0))
@@ -581,7 +582,7 @@ func scenarioA(
nacc.pk,
tLife,
genesisHash,
- basics.AssetIndex(asset.AssetId),
+ basics.AssetIndex(asset.AssetID),
ownAllAccount.pk,
asset.Amount)
counter, txnGroup = queueTransaction(nacc.sk, assSend, txnChan, txnGrpChan, counter, txnGroup)
@@ -612,7 +613,7 @@ func scenarioA(
default:
}
- assHold, err := fixture.AlgodClient.AccountAssetInformation(ownAllAccount.pk.String(), asset.AssetId)
+ assHold, err := fixture.AlgodClient.AccountAssetInformation(ownAllAccount.pk.String(), asset.AssetID)
require.NoError(t, err)
tAssetAmt += assHold.AssetHolding.Amount
@@ -673,7 +674,7 @@ func scenarioB(
counter, firstValid, err = checkPoint(counter, firstValid, tLife, true, fixture, log)
require.NoError(t, err)
- info, err := fixture.AlgodClient.AccountInformationV2(baseAcct.pk.String(), false)
+ info, err := fixture.AlgodClient.AccountInformation(baseAcct.pk.String(), false)
require.NoError(t, err)
require.Equal(t, numberOfAssets, info.TotalAssetsOptedIn)
require.Equal(t, numberOfAssets, info.TotalCreatedAssets)
@@ -1155,7 +1156,7 @@ int 1
// create the app
appTx, err = client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
require.NoError(t, err)
note := make([]byte, 8)
@@ -1182,7 +1183,7 @@ func makeOptInAppTransaction(
tLife uint64,
genesisHash crypto.Digest) (appTx transactions.Transaction) {
- appTx, err := client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ appTx, err := client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
require.NoError(t, err)
appTx.Header = transactions.Header{
@@ -1198,7 +1199,7 @@ func makeOptInAppTransaction(
// checks and verifies the app params by comparing them against the baseline
func checkApplicationParams(
acTF transactions.ApplicationCallTxnFields,
- app generated.ApplicationParams,
+ app model.ApplicationParams,
creator string,
globalStateCheck *[]bool,
globalStateCheckMu *deadlock.Mutex) (pass bool) {
@@ -1288,7 +1289,7 @@ func callAppTransaction(
tLife uint64,
genesisHash crypto.Digest) (appTx transactions.Transaction) {
- appTx, err := client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil)
+ appTx, err := client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
require.NoError(t, err)
appTx.Header = transactions.Header{
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index 83b01b8c0..f745c9e75 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -31,7 +31,7 @@ import (
"github.com/algorand/go-algorand/config"
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
@@ -83,10 +83,10 @@ func (ec *nodeExitErrorCollector) Print() {
}
// awaitCatchpointCreation attempts catchpoint retrieval with retries when the catchpoint is not yet available.
-func awaitCatchpointCreation(client algodclient.RestClient, fixture *fixtures.RestClientFixture, roundWaitCount uint8) (generatedV2.NodeStatusResponse, error) {
+func awaitCatchpointCreation(client algodclient.RestClient, fixture *fixtures.RestClientFixture, roundWaitCount uint8) (model.NodeStatusResponse, error) {
s, err := client.Status()
if err != nil {
- return generatedV2.NodeStatusResponse{}, err
+ return model.NodeStatusResponse{}, err
}
if len(*s.LastCatchpoint) > 0 {
@@ -97,13 +97,13 @@ func awaitCatchpointCreation(client algodclient.RestClient, fixture *fixtures.Re
if roundWaitCount-1 > 0 {
err = fixture.ClientWaitForRound(client, s.LastRound+1, 10*time.Second)
if err != nil {
- return generatedV2.NodeStatusResponse{}, err
+ return model.NodeStatusResponse{}, err
}
return awaitCatchpointCreation(client, fixture, roundWaitCount-1)
}
- return generatedV2.NodeStatusResponse{}, fmt.Errorf("No catchpoint exists")
+ return model.NodeStatusResponse{}, fmt.Errorf("No catchpoint exists")
}
func TestBasicCatchpointCatchup(t *testing.T) {
@@ -255,7 +255,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
log.Infof(" - done catching up!\n")
// ensure the catchpoint is created for targetCatchpointRound
- var status generatedV2.NodeStatusResponse
+ var status model.NodeStatusResponse
timer := time.NewTimer(10 * time.Second)
outer:
for {
@@ -385,7 +385,6 @@ func TestCatchpointLabelGeneration(t *testing.T) {
break
}
currentRound++
-
}
log.Infof("done building!\n")
@@ -400,3 +399,295 @@ func TestCatchpointLabelGeneration(t *testing.T) {
})
}
}
+
+// TestNodeTxHandlerRestart starts a two-node and one relay network
+// Waits until a catchpoint is created
+// Lets the primary node have the majority of the stake
+// Sends a transaction from the second node
+// The transaction will be confirmed only if the txHandler gets the transaction
+func TestNodeTxHandlerRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensus := make(config.ConsensusProtocols)
+ protoVersion := protocol.ConsensusCurrentVersion
+ catchpointCatchupProtocol := config.Consensus[protoVersion]
+ catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
+ // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
+ catchpointCatchupProtocol.SeedLookback = 2
+ catchpointCatchupProtocol.SeedRefreshInterval = 2
+ catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 8
+ catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
+ catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
+ catchpointCatchupProtocol.StateProofInterval = 0
+ if runtime.GOOS == "darwin" || runtime.GOARCH == "amd64" {
+ // amd64/macos platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
+ catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
+ }
+ consensus[protoVersion] = catchpointCatchupProtocol
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes50EachWithRelay.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Node1")
+ a.NoError(err)
+ // Get secondary node
+ secondNode, err := fixture.GetNodeController("Node2")
+ a.NoError(err)
+ // Get the relay
+ relayNode, err := fixture.GetNodeController("Relay")
+ a.NoError(err)
+
+ // prepare it's configuration file to set it to generate a catchpoint every 16 rounds.
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
+ cfg.MaxAcctLookback = 2
+ cfg.Archival = false
+
+ cfg.TxSyncIntervalSeconds = 200000 // disable txSync
+
+ cfg.SaveToDisk(primaryNode.GetDataDir())
+ cfg.SaveToDisk(secondNode.GetDataDir())
+
+ cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
+ a.NoError(err)
+ cfg.TxSyncIntervalSeconds = 200000 // disable txSync
+ cfg.SaveToDisk(relayNode.GetDataDir())
+
+ fixture.Start()
+ defer fixture.LibGoalFixture.Shutdown()
+
+ client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
+ client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ wallet1, err := client1.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ wallet2, err := client2.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addrs1, err := client1.ListAddresses(wallet1)
+ a.NoError(err)
+ addrs2, err := client2.ListAddresses(wallet2)
+ a.NoError(err)
+
+ // let the second node have insufficient stake for proposing a block
+ tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil)
+ a.NoError(err)
+ status, err := client1.Status()
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+100, addrs1[0], tx.ID().String())
+ a.NoError(err)
+ targetCatchpointRound := status.LastRound
+
+ // ensure the catchpoint is created for targetCatchpointRound
+ timer := time.NewTimer(100 * time.Second)
+outer:
+ for {
+ status, err = client1.Status()
+ a.NoError(err)
+
+ var round basics.Round
+ if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
+ round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
+ a.NoError(err)
+ if uint64(round) >= targetCatchpointRound {
+ break
+ }
+ }
+ select {
+ case <-timer.C:
+ a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
+ break outer
+ default:
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+
+ // let the primary node catchup
+ err = client1.Catchup(*status.LastCatchpoint)
+ a.NoError(err)
+
+ status1, err := client1.Status()
+ a.NoError(err)
+ targetRound := status1.LastRound + 5
+
+ // Wait for the network to start making progress again
+ primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
+ err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound,
+ 10*catchpointCatchupProtocol.AgreementFilterTimeout)
+ a.NoError(err)
+
+ // let the 2nd client send a transaction
+ tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 50000, nil)
+ a.NoError(err)
+
+ status, err = client2.Status()
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+50, addrs2[0], tx.ID().String())
+ a.NoError(err)
+}
+
+// TestNodeTxSyncRestart starts a two-node and one relay network
+// Waits until a catchpoint is created
+// Lets the primary node have the majority of the stake
+// Stops the primary node to miss the next transaction
+// Sends a transaction from the second node
+// Starts the primary node, and immediately after start the catchup
+// The transaction will be confirmed only when the TxSync of the pools passes the transaction to the primary node
+func TestNodeTxSyncRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensus := make(config.ConsensusProtocols)
+ protoVersion := protocol.ConsensusCurrentVersion
+ catchpointCatchupProtocol := config.Consensus[protoVersion]
+ catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
+ // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
+ catchpointCatchupProtocol.SeedLookback = 2
+ catchpointCatchupProtocol.SeedRefreshInterval = 2
+ catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval
+ catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
+ catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
+ catchpointCatchupProtocol.StateProofInterval = 0
+ if runtime.GOOS == "darwin" || runtime.GOARCH == "amd64" {
+ // amd64/macos platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
+ catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
+ }
+ consensus[protoVersion] = catchpointCatchupProtocol
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes50EachWithRelay.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Node1")
+ a.NoError(err)
+ // Get secondary node
+ secondNode, err := fixture.GetNodeController("Node2")
+ a.NoError(err)
+ // Get the relay
+ relayNode, err := fixture.GetNodeController("Relay")
+ a.NoError(err)
+
+ // prepare it's configuration file to set it to generate a catchpoint every 16 rounds.
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
+ cfg.MaxAcctLookback = 2
+ cfg.Archival = false
+
+ // Shorten the txn sync interval so the test can run faster
+ cfg.TxSyncIntervalSeconds = 4
+
+ cfg.SaveToDisk(primaryNode.GetDataDir())
+ cfg.SaveToDisk(secondNode.GetDataDir())
+
+ cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
+ a.NoError(err)
+ cfg.TxSyncIntervalSeconds = 4
+ cfg.SaveToDisk(relayNode.GetDataDir())
+
+ fixture.Start()
+ defer fixture.LibGoalFixture.Shutdown()
+
+ client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
+ client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ wallet1, err := client1.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ wallet2, err := client2.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addrs1, err := client1.ListAddresses(wallet1)
+ a.NoError(err)
+ addrs2, err := client2.ListAddresses(wallet2)
+ a.NoError(err)
+
+ // let the second node have insufficient stake for proposing a block
+ tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil)
+ a.NoError(err)
+ status, err := client1.Status()
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+100, addrs1[0], tx.ID().String())
+ a.NoError(err)
+ targetCatchpointRound := status.LastRound
+
+ // ensure the catchpoint is created for targetCatchpointRound
+ timer := time.NewTimer(100 * time.Second)
+outer:
+ for {
+ status, err = client1.Status()
+ a.NoError(err)
+
+ var round basics.Round
+ if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
+ round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
+ a.NoError(err)
+ if uint64(round) >= targetCatchpointRound {
+ break
+ }
+ }
+ select {
+ case <-timer.C:
+ a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
+ break outer
+ default:
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+
+ // stop the primary node
+ client1.FullStop()
+
+ // let the 2nd client send a transaction
+ tx, err = client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 50000, nil)
+ a.NoError(err)
+
+ // now that the primary missed the transaction, start it, and let it catchup
+ _, err = fixture.StartNode(primaryNode.GetDataDir())
+ a.NoError(err)
+ // let the primary node catchup
+ err = client1.Catchup(*status.LastCatchpoint)
+ a.NoError(err)
+
+ // the transaction should not be confirmed yet
+ _, err = fixture.WaitForConfirmedTxn(0, addrs2[0], tx.ID().String())
+ a.Error(err)
+
+ // Wait for the catchup
+ for t := 0; t < 10; t++ {
+ status1, err := client1.Status()
+ a.NoError(err)
+ status2, err := client2.Status()
+ a.NoError(err)
+
+ if status1.LastRound+1 >= status2.LastRound {
+ // if the primary node is within 1 round of the secondary node, then it has
+ // caught up
+ break
+ }
+ time.Sleep(catchpointCatchupProtocol.AgreementFilterTimeout)
+ }
+
+ status, err = client2.Status()
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+50, addrs2[0], tx.ID().String())
+ a.NoError(err)
+}
diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go
index e5c9ab7ea..c356615e9 100644
--- a/test/e2e-go/features/devmode/devmode_test.go
+++ b/test/e2e-go/features/devmode/devmode_test.go
@@ -48,13 +48,14 @@ func TestDevMode(t *testing.T) {
key := crypto.GenerateSignatureSecrets(crypto.Seed{})
receiver := basics.Address(key.SignatureVerifier)
txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "")
- firstRound := txn.ConfirmedRound + 1
+ require.NotNil(t, txn.ConfirmedRound)
+ firstRound := *txn.ConfirmedRound + 1
start := time.Now()
// 2 transactions should be sent within one normal confirmation time.
for i := uint64(0); i < 2; i++ {
txn = fixture.SendMoneyAndWait(firstRound+i, 100000, 1000, sender.Address, receiver.String(), "")
- require.Equal(t, firstRound+i, txn.FirstRound)
+ require.Equal(t, firstRound+i, txn.Txn.Txn.FirstValid)
}
require.True(t, time.Since(start) < 8*time.Second, "Transactions should be quickly confirmed faster than usual.")
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
index 5047d7cd1..52f25ec83 100644
--- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -29,7 +29,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/test/framework/fixtures"
@@ -37,7 +37,7 @@ import (
)
// installParticipationKey generates a new key for a given account and installs it with the client.
-func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
+func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp model.PostParticipationResponse, part account.Participation, err error) {
// Install overlapping participation keys...
part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, t.TempDir())
require.NoError(t, err)
@@ -48,7 +48,7 @@ func installParticipationKey(t *testing.T, client libgoal.Client, addr string, f
return
}
-func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) generated.NodeStatusResponse {
+func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) model.NodeStatusResponse {
txParams, err := client.SuggestedParams()
require.NoError(t, err)
sAccount := part.Address().String()
@@ -74,7 +74,7 @@ func TestKeyRegistration(t *testing.T) {
t.Skip()
}
- checkKey := func(key generated.ParticipationKey, firstValid, lastValid, lastProposal uint64, msg string) {
+ checkKey := func(key model.ParticipationKey, firstValid, lastValid, lastProposal uint64, msg string) {
require.NotNil(t, key.EffectiveFirstValid, fmt.Sprintf("%s.EffectiveFirstValid", msg))
require.NotNil(t, key.EffectiveLastValid, fmt.Sprintf("%s.EffectiveLastValid", msg))
require.NotNil(t, key.LastBlockProposal, fmt.Sprintf("%s.LastBlockProposal", msg))
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index 97ac14e00..0a6f23311 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -199,14 +199,15 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, newAccount, onlineTxID)
nodeStatus, _ = client.Status()
onlineRound := nodeStatus.LastRound
- newAccountStatus, err := client.AccountInformation(newAccount)
+ newAccountStatus, err := client.AccountInformation(newAccount, false)
a.NoError(err, "client should be able to get information about new account")
a.Equal(basics.Online.String(), newAccountStatus.Status, "new account should be online")
// transfer the funds and close to the new account
amountToSend := richBalance - 3*transactionFee - amountToSendInitial - minAcctBalance
txn := fixture.SendMoneyAndWait(onlineRound, amountToSend, transactionFee, richAccount, newAccount, newAccount)
- fundedRound := txn.ConfirmedRound
+ a.NotNil(txn.ConfirmedRound)
+ fundedRound := *txn.ConfirmedRound
nodeStatus, _ = client.Status()
params, err := client.ConsensusParams(nodeStatus.LastRound)
@@ -308,11 +309,12 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) {
fixture.AssertValidTxid(onlineTxID)
maxRoundsToWaitForTxnConfirm := uint64(5)
nodeStatus, err := client.Status()
+ a.NoError(err)
seededRound := nodeStatus.LastRound
fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, newAccount, onlineTxID)
nodeStatus, _ = client.Status()
- accountStatus, err := client.AccountInformation(newAccount)
+ accountStatus, err := client.AccountInformation(newAccount, false)
a.NoError(err, "client should be able to get information about new account")
a.Equal(basics.Online.String(), accountStatus.Status, "new account should be online")
}
diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go
index 06b392856..6f8c0b1a9 100644
--- a/test/e2e-go/features/participation/participationExpiration_test.go
+++ b/test/e2e-go/features/participation/participationExpiration_test.go
@@ -64,7 +64,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
a.GreaterOrEqual(newAmt, initialAmt)
- newAccountStatus, err := pClient.AccountInformation(sAccount)
+ newAccountStatus, err := pClient.AccountInformation(sAccount, false)
a.NoError(err)
a.Equal(basics.Offline.String(), newAccountStatus.Status)
@@ -114,7 +114,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
txnConfirmed := fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, sAccount, onlineTxID)
a.True(txnConfirmed)
- newAccountStatus, err = pClient.AccountInformation(sAccount)
+ newAccountStatus, err = pClient.AccountInformation(sAccount, false)
a.NoError(err)
a.Equal(basics.Online.String(), newAccountStatus.Status)
@@ -135,7 +135,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
// We want to wait until we get to one round past the last valid round
err = fixture.WaitForRoundWithTimeout(uint64(lastValidRound) + 1)
- newAccountStatus, err = pClient.AccountInformation(sAccount)
+ newAccountStatus, err = pClient.AccountInformation(sAccount, false)
a.NoError(err)
// The account should be online still...
@@ -150,16 +150,16 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
_, err = sClient.WaitForRound(uint64(lastValidRound + 1))
a.NoError(err)
- blk, err := sClient.Block(latestRound)
+ blk, err := sClient.BookkeepingBlock(latestRound)
a.NoError(err)
- a.Equal(blk.CurrentProtocol, protocolCheck)
+ a.Equal(string(blk.CurrentProtocol), protocolCheck)
sendMoneyTxn := fixture.SendMoneyAndWait(latestRound, amountToSendInitial, transactionFee, richAccount, sAccount, "")
- txnConfirmed = fixture.WaitForTxnConfirmation(latestRound+maxRoundsToWaitForTxnConfirm, sAccount, sendMoneyTxn.TxID)
+ txnConfirmed = fixture.WaitForTxnConfirmation(latestRound+maxRoundsToWaitForTxnConfirm, sAccount, sendMoneyTxn.Txn.ID().String())
a.True(txnConfirmed)
- newAccountStatus, err = pClient.AccountInformation(sAccount)
+ newAccountStatus, err = pClient.AccountInformation(sAccount, false)
a.NoError(err)
// The account should be equal to the target status now
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index 3c8a29371..5581de195 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -42,15 +42,16 @@ func getFirstAccountFromNamedNode(fixture *fixtures.RestClientFixture, r *requir
}
func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round uint64) (uint64, error) {
- block, err := fixture.AlgodClient.Block(round)
a := require.New(fixtures.SynchronizedTest(t))
+
+ block, err := fixture.LibGoalClient.BookkeepingBlock(round)
a.NoError(err)
for {
round++
err := fixture.WaitForRoundWithTimeout(round + 1)
a.NoError(err)
- nextBlock, err := fixture.AlgodClient.Block(round)
+ nextBlock, err := fixture.LibGoalClient.BookkeepingBlock(round)
a.NoError(err)
if nextBlock.RewardsLevel > block.RewardsLevel {
@@ -126,7 +127,7 @@ func TestOnlineOfflineRewards(t *testing.T) {
finalOnlineBalance, _ := onlineClient.GetBalance(onlineAccount)
finalOfflineBalance, _ := offlineClient.GetBalance(offlineAccount)
- blk, err := fixture.AlgodClient.Block(initialRound)
+ blk, err := fixture.LibGoalClient.BookkeepingBlock(initialRound)
r.NoError(err)
rewardUnit := config.Consensus[protocol.ConsensusVersion(blk.CurrentProtocol)].RewardUnit
// online account should be rewarded at least the expected amount
@@ -217,7 +218,7 @@ func TestRewardUnitThreshold(t *testing.T) {
minFee, minBalance, err := fixture.MinFeeAndBalance(initialRound)
r.NoError(err)
- blk, err := client.Block(initialRound)
+ blk, err := client.BookkeepingBlock(initialRound)
r.NoError(err)
rewardUnit := config.Consensus[protocol.ConsensusVersion(blk.CurrentProtocol)].RewardUnit
// accrue rewards by letting time pass
@@ -254,8 +255,8 @@ func TestRewardUnitThreshold(t *testing.T) {
r.NoError(err)
// newAccount should NOT be rewarded
// poorAccount should be rewarded
- updatedBalancePoorAccount, _ := client.AccountInformation(poorAccount)
- updatedBalanceNewAccount, _ := client.AccountInformation(newAccount)
+ updatedBalancePoorAccount, _ := client.AccountInformation(poorAccount, false)
+ updatedBalanceNewAccount, _ := client.AccountInformation(newAccount, false)
poorAccountDelta := updatedBalancePoorAccount.Amount - initialBalancePoorAccount
r.Truef(initialBalancePoorAccount/rewardUnit <= poorAccountDelta, "non-empty account with balance > rewardunit (%d) should accrue rewards. started with %d, given %d, now has %d. Expected %d", rewardUnit, initialBalancePoorAccount, amountRichAccountPokesWith, updatedBalancePoorAccount.Amount, amountRichAccountPokesWith+initialBalancePoorAccount/rewardUnit)
r.Truef(initialBalancePoorAccount/rewardUnit <= updatedBalancePoorAccount.Rewards, "non-empty account with balance > rewardunit (%d) should accrue rewards. started with %d, given %d, now has %d, actual rewards %d", rewardUnit, initialBalancePoorAccount, amountRichAccountPokesWith, updatedBalancePoorAccount.Amount, updatedBalancePoorAccount.Rewards)
@@ -288,7 +289,7 @@ func TestRewardUnitThreshold(t *testing.T) {
client.WaitForRound(rewardRound2)
// Ensure that a reward for newAccount's one reward unit is now pending
- latestBalanceNewAccount, _ := client.AccountInformation(newAccount)
+ latestBalanceNewAccount, _ := client.AccountInformation(newAccount, false)
r.Truef((initialBalanceNewAccount+amountRichAccountPokesWith)/rewardUnit >= 1, "new account needs at least one reward unit")
r.Truef(latestBalanceNewAccount.Amount >= initialBalanceNewAccount+(initialBalanceNewAccount+amountRichAccountPokesWith)/rewardUnit,
"account sent at least %d should have accrued rewards. started with %d, was bumped to %d, so increase should be more than the %d seen",
@@ -298,7 +299,7 @@ func TestRewardUnitThreshold(t *testing.T) {
r.Equal(initialBalanceNewAccount+amountRichAccountPokesWith, latestBalanceNewAccount.AmountWithoutPendingRewards, "rewards should be pending")
// since we poked, previous rewards should no longer be pending for poor account
- latestBalancePoorAccount, _ := client.AccountInformation(poorAccount)
+ latestBalancePoorAccount, _ := client.AccountInformation(poorAccount, false)
r.Truef(latestBalancePoorAccount.AmountWithoutPendingRewards >= updatedBalancePoorAccount.Amount+amountRichAccountPokesWith, "rewards should have been applied")
// Test e2e REST API convenience computations
@@ -349,7 +350,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(err)
fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
- blk, err := client.Block(curStatus.LastRound)
+ blk, err := client.BookkeepingBlock(curStatus.LastRound)
r.NoError(err)
r.Equal(protocol.ConsensusVersion(blk.CurrentProtocol), consensusTestRapidRewardRecalculation)
consensusParams := consensus[protocol.ConsensusVersion(blk.CurrentProtocol)]
@@ -359,10 +360,10 @@ func TestRewardRateRecalculation(t *testing.T) {
if roundQueried != rewardRecalcRound-1 {
r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
- lastRoundBeforeRewardRecals, err := client.Block(rewardRecalcRound - 1)
+ lastRoundBeforeRewardRecals, err := client.BookkeepingBlock(rewardRecalcRound - 1)
r.NoError(err)
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound))
- blk, err = client.Block(rewardRecalcRound)
+ blk, err = client.BookkeepingBlock(rewardRecalcRound)
r.NoError(err)
if !consensusParams.PendingResidueRewards {
lastRoundBeforeRewardRecals.RewardsResidue = 0
@@ -381,11 +382,11 @@ func TestRewardRateRecalculation(t *testing.T) {
if roundQueried != rewardRecalcRound-1 {
r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
- lastRoundBeforeRewardRecals, err = client.Block(rewardRecalcRound - 1)
+ lastRoundBeforeRewardRecals, err = client.BookkeepingBlock(rewardRecalcRound - 1)
r.NoError(err)
consensusParams = consensus[protocol.ConsensusVersion(lastRoundBeforeRewardRecals.CurrentProtocol)]
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound))
- blk, err = client.Block(rewardRecalcRound)
+ blk, err = client.BookkeepingBlock(rewardRecalcRound)
r.NoError(err)
if !consensusParams.PendingResidueRewards {
lastRoundBeforeRewardRecals.RewardsResidue = 0
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index a3b72180f..ac2270cc4 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -35,9 +35,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
sp "github.com/algorand/go-algorand/crypto/stateproof"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -200,7 +198,7 @@ func verifyStateProofsCreation(t *testing.T, fixture *fixtures.RestClientFixture
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
// Find the state proof transaction
- stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
lastStateProofMessage = stateProofMessage
lastStateProofBlock = nextStateProofBlock
}
@@ -311,7 +309,7 @@ func TestStateProofOverlappingKeys(t *testing.T) {
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
// Find the state proof transaction
- stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
lastStateProofMessage = stateProofMessage
lastStateProofBlock = nextStateProofBlock
}
@@ -362,7 +360,7 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) {
nextStateProofRound = uint64(blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
}
- _, stateProofMessage := getStateProofByLastRound(r, &fixture, firstStateProofRound, 1)
+ _, stateProofMessage := getStateProofByLastRound(r, &fixture, firstStateProofRound)
t.Logf("found first stateproof, attesting to rounds %d - %d. Verifying.\n", stateProofMessage.FirstAttestedRound, stateProofMessage.LastAttestedRound)
for rnd := stateProofMessage.FirstAttestedRound; rnd <= stateProofMessage.LastAttestedRound; rnd++ {
@@ -396,39 +394,30 @@ func getDefaultStateProofConsensusParams() config.ConsensusParams {
return consensusParams
}
-func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClientFixture, stateProofLatestRound uint64, expectedNumberOfStateProofs uint64) (sp.StateProof, stateproofmsg.Message) {
+func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClientFixture, stateProofLatestRound uint64) (sp.StateProof, stateproofmsg.Message) {
restClient, err := fixture.NC.AlgodClient()
r.NoError(err)
- curRound, err := fixture.LibGoalClient.CurrentRound()
- r.NoError(err)
-
- res, err := restClient.TransactionsByAddr(transactions.StateProofSender.String(), 0, curRound, expectedNumberOfStateProofs+1)
+ res, err := restClient.StateProofs(stateProofLatestRound)
r.NoError(err)
+ r.Equal(res.Message.LastAttestedRound, stateProofLatestRound)
var stateProof sp.StateProof
- var stateProofMessage stateproofmsg.Message
- for _, txn := range res.Transactions {
- r.Equal(txn.Type, string(protocol.StateProofTx))
- r.True(txn.StateProof != nil)
- err = protocol.Decode(txn.StateProof.StateProofMessage, &stateProofMessage)
- r.NoError(err)
- if stateProofMessage.LastAttestedRound == stateProofLatestRound {
- err = protocol.Decode(txn.StateProof.StateProof, &stateProof)
- r.NoError(err)
+ err = protocol.Decode(res.StateProof, &stateProof)
+ r.NoError(err)
- return stateProof, stateProofMessage
- }
+ msg := stateproofmsg.Message{
+ BlockHeadersCommitment: res.Message.BlockHeadersCommitment,
+ VotersCommitment: res.Message.VotersCommitment,
+ LnProvenWeight: res.Message.LnProvenWeight,
+ FirstAttestedRound: res.Message.FirstAttestedRound,
+ LastAttestedRound: res.Message.LastAttestedRound,
}
-
- r.FailNow("no state proof with latest round %d found", stateProofLatestRound)
-
- // Should never get here
- return sp.StateProof{}, stateproofmsg.Message{}
+ return stateProof, msg
}
-func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClientFixture, nextStateProofRound uint64, prevStateProofMessage stateproofmsg.Message, lastStateProofBlock bookkeeping.Block, consensusParams config.ConsensusParams, expectedNumberOfStateProofs uint64) (stateproofmsg.Message, bookkeeping.Block) {
- stateProof, stateProofMessage := getStateProofByLastRound(r, fixture, nextStateProofRound, expectedNumberOfStateProofs)
+func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClientFixture, nextStateProofRound uint64, prevStateProofMessage stateproofmsg.Message, lastStateProofBlock bookkeeping.Block, consensusParams config.ConsensusParams) (stateproofmsg.Message, bookkeeping.Block) {
+ stateProof, stateProofMessage := getStateProofByLastRound(r, fixture, nextStateProofRound)
nextStateProofBlock, err := fixture.LibGoalClient.BookkeepingBlock(nextStateProofRound)
@@ -545,7 +534,7 @@ func TestRecoverFromLaggingStateProofChain(t *testing.T) {
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
// Find the state proof transaction
- stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
lastStateProofMessage = stateProofMessage
lastStateProofBlock = nextStateProofBlock
}
@@ -639,7 +628,7 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
}
// installParticipationKey generates a new key for a given account and installs it with the client.
-func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
+func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp model.PostParticipationResponse, part account.Participation, err error) {
dir, err := os.MkdirTemp("", "temporary_partkey_dir")
require.NoError(t, err)
defer os.RemoveAll(dir)
@@ -654,7 +643,7 @@ func installParticipationKey(t *testing.T, client libgoal.Client, addr string, f
return
}
-func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) generated.NodeStatusResponse {
+func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) model.NodeStatusResponse {
currentRnd, err := client.CurrentRound()
require.NoError(t, err)
sAccount := part.Address().String()
@@ -771,7 +760,7 @@ func TestAttestorsChange(t *testing.T) {
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
// Find the state proof transaction
- stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
lastStateProofMessage = stateProofMessage
lastStateProofBlock = nextStateProofBlock
}
@@ -852,7 +841,7 @@ func TestTotalWeightChanges(t *testing.T) {
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
// Find the state proof transaction
- stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
lastStateProofMessage = stateProofMessage
lastStateProofBlock = nextStateProofBlock
}
@@ -909,16 +898,13 @@ func TestSPWithTXPoolFull(t *testing.T) {
err = fixture.WaitForRound(round+1, 6*time.Second)
require.NoError(t, err)
- b, err := relay.Block(round + 1)
+ b, err := relay.BookkeepingBlock(round + 1)
require.NoError(t, err)
- if len(b.Transactions.Transactions) == 0 {
+ if len(b.Payset) == 0 {
continue
}
- require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[0].Type)
- var msg stateproofmsg.Message
- err = protocol.Decode(b.Transactions.Transactions[0].StateProof.StateProofMessage, &msg)
- require.NoError(t, err)
- require.Equal(t, uint64(8), msg.LastAttestedRound)
+ require.Equal(t, protocol.StateProofTx, b.Payset[0].Txn.Type)
+ require.Equal(t, uint64(8), b.Payset[0].Txn.StateProofTxnFields.Message.LastAttestedRound)
break
}
require.Less(t, round, uint64(20))
@@ -968,12 +954,12 @@ func TestAtMostOneSPFullPool(t *testing.T) {
err := fixture.WaitForRound(round+1, 6*time.Second)
require.NoError(t, err)
- b, err := relay.Block(round + 1)
+ b, err := relay.BookkeepingBlock(round + 1)
require.NoError(t, err)
params, err = relay.SuggestedParams()
require.NoError(t, err)
- if len(b.Transactions.Transactions) == 0 {
+ if len(b.Payset) == 0 {
continue
}
tid := 0
@@ -981,14 +967,11 @@ func TestAtMostOneSPFullPool(t *testing.T) {
// Since the pool is full, only one additional SP transaction is allowed in. So only one SP can be added to be block
// break after finding it, and look for the next one in a subsequent block
// In case two SP transactions get into the same block, the following loop will not find the second one, and fail the test
- for ; tid < len(b.Transactions.Transactions); tid++ {
- if b.Transactions.Transactions[tid].Type == string(protocol.StateProofTx) {
- require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[tid].Type)
+ for ; tid < len(b.Payset); tid++ {
+ if string(b.Payset[tid].Txn.Type) == string(protocol.StateProofTx) {
+ require.Equal(t, protocol.StateProofTx, b.Payset[tid].Txn.Type)
- var msg stateproofmsg.Message
- err = protocol.Decode(b.Transactions.Transactions[tid].StateProof.StateProofMessage, &msg)
- require.NoError(t, err)
- require.Equal(t, int(expectedSPRound), int(msg.LastAttestedRound))
+ require.Equal(t, int(expectedSPRound), int(b.Payset[tid].Txn.StateProofTxnFields.Message.LastAttestedRound))
expectedSPRound = expectedSPRound + consensusParams.StateProofInterval
break
@@ -1108,12 +1091,12 @@ func TestAtMostOneSPFullPoolWithLoad(t *testing.T) {
err := fixture.WaitForRound(round+1, 6*time.Second)
require.NoError(t, err)
- b, err := relay.Block(round + 1)
+ b, err := relay.BookkeepingBlock(round + 1)
require.NoError(t, err)
params, err = relay.SuggestedParams()
require.NoError(t, err)
- if len(b.Transactions.Transactions) == 0 {
+ if len(b.Payset) == 0 {
continue
}
tid := 0
@@ -1121,14 +1104,11 @@ func TestAtMostOneSPFullPoolWithLoad(t *testing.T) {
// Since the pool is full, only one additional SP transaction is allowed in. So only one SP can be added to be block
// break after finding it, and look for the next one in a subsequent block
// In case two SP transactions get into the same block, the following loop will not find the second one, and fail the test
- for ; tid < len(b.Transactions.Transactions); tid++ {
- if b.Transactions.Transactions[tid].Type == string(protocol.StateProofTx) {
- require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[tid].Type)
+ for ; tid < len(b.Payset); tid++ {
+ if string(b.Payset[tid].Txn.Type) == string(protocol.StateProofTx) {
+ require.Equal(t, protocol.StateProofTx, b.Payset[tid].Txn.Type)
- var msg stateproofmsg.Message
- err = protocol.Decode(b.Transactions.Transactions[tid].StateProof.StateProofMessage, &msg)
- require.NoError(t, err)
- require.Equal(t, int(expectedSPRound), int(msg.LastAttestedRound))
+ require.Equal(t, int(expectedSPRound), int(b.Payset[tid].Txn.StateProofTxnFields.Message.LastAttestedRound))
expectedSPRound = expectedSPRound + consensusParams.StateProofInterval
break
@@ -1209,8 +1189,8 @@ func TestStateProofCheckTotalStake(t *testing.T) {
var lastStateProofBlock bookkeeping.Block
libgoalClient := fixture.LibGoalClient
- var totalSupplyAtRound [100]v1.Supply
- var accountSnapshotAtRound [100][]generatedV2.Account
+ var totalSupplyAtRound [100]model.SupplyResponse
+ var accountSnapshotAtRound [100][]model.Account
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
if rnd == consensusParams.StateProofInterval+consensusParams.StateProofVotersLookback { // here we register the keys of address 0 so it won't be able the sign a state proof (its stake would be removed for the total)
@@ -1237,12 +1217,12 @@ func TestStateProofCheckTotalStake(t *testing.T) {
totalSupply, err := libgoalClient.LedgerSupply()
r.NoError(err)
- r.Equal(rnd, totalSupply.Round, "could not capture total stake at the target round. The machine might be too slow for this test")
+ r.Equal(rnd, totalSupply.CurrentRound, "could not capture total stake at the target round. The machine might be too slow for this test")
totalSupplyAtRound[rnd] = totalSupply
- accountSnapshotAtRound[rnd] = make([]generatedV2.Account, pNodes, pNodes)
+ accountSnapshotAtRound[rnd] = make([]model.Account, pNodes, pNodes)
for i := 0; i < pNodes; i++ {
- accountSnapshotAtRound[rnd][i], err = libgoalClient.AccountInformationV2(accountsAddresses[i], false)
+ accountSnapshotAtRound[rnd][i], err = libgoalClient.AccountInformation(accountsAddresses[i], false)
r.NoError(err)
r.NotEqual(accountSnapshotAtRound[rnd][i].Amount, uint64(0))
r.Equal(rnd, accountSnapshotAtRound[rnd][i].Round, "could not capture the account at the target round. The machine might be too slow for this test")
@@ -1275,7 +1255,7 @@ func TestStateProofCheckTotalStake(t *testing.T) {
t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
- stateProof, stateProofMsg := getStateProofByLastRound(r, &fixture, nextStateProofRound, expectedNumberOfStateProofs)
+ stateProof, stateProofMsg := getStateProofByLastRound(r, &fixture, nextStateProofRound)
accountSnapshot := accountSnapshotAtRound[stateProofMsg.LastAttestedRound-consensusParams.StateProofInterval-consensusParams.StateProofVotersLookback]
diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go
index 52b39c9ee..297222934 100644
--- a/test/e2e-go/features/transactions/accountv2_test.go
+++ b/test/e2e-go/features/transactions/accountv2_test.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -159,7 +160,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
@@ -204,7 +205,7 @@ int 1
a.True(ok)
a.Equal(uint64(1), value.Uint)
- txInfo, err := fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ txInfo, err := fixture.LibGoalClient.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(txInfo.ConfirmedRound)
a.NotZero(*txInfo.ConfirmedRound)
@@ -214,7 +215,7 @@ int 1
checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
@@ -231,10 +232,11 @@ int 1
// Ensure the txn committed
resp, err := client.GetPendingTransactions(2)
a.NoError(err)
- a.Equal(uint64(0), resp.TotalTxns)
- txinfo, err := client.TransactionInformation(signedTxn.Txn.Sender.String(), txid)
+ a.Equal(uint64(0), resp.TotalTransactions)
+ txinfo, err := client.PendingTransactionInformation(txid)
a.NoError(err)
- a.True(txinfo.ConfirmedRound != 0)
+ a.NotNil(txinfo.ConfirmedRound)
+ a.True(*txinfo.ConfirmedRound != 0)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
@@ -276,7 +278,7 @@ int 1
a.True(ok)
a.Equal(uint64(1), value.Uint)
- txInfo, err = fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(txInfo.ConfirmedRound)
a.NotZero(*txInfo.ConfirmedRound)
@@ -293,7 +295,7 @@ int 1
a.Equal(creator, app.Params.Creator)
// call the app
- tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
@@ -307,13 +309,13 @@ int 1
_, err = client.WaitForRound(round + 1)
a.NoError(err)
// Ensure the txn committed
- resp, err = client.GetPendingTransactions(2)
+ resp, err := client.GetParsedPendingTransactions(2)
a.NoError(err)
- if resp.TotalTxns == 1 {
- a.Equal(resp.TruncatedTxns.Transactions[0].TxID, txid)
+ if resp.TotalTransactions == 1 {
+ a.Equal(resp.TopTransactions[0].Txn.ID().String(), txid)
continue
}
- a.Equal(uint64(0), resp.TotalTxns)
+ a.Equal(uint64(0), resp.TotalTransactions)
break
}
@@ -326,7 +328,323 @@ int 1
a.True(ok)
a.Equal(uint64(3), value.Uint)
- txInfo, err = fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformation(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound = *txInfo.ConfirmedRound
+
+ // 3 global state update in total, 2 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 3, 2)
+}
+
+// Add offending asset index greater than uint64
+func TestAccountInformationWithBadAssetIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ t.Parallel()
+ accountInformationCheckWithOffendingFields(t, []basics.AssetIndex{12181853637140359511}, nil, nil)
+}
+
+// Add missing asset index
+func TestAccountInformationWithMissingAssetIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, []basics.AssetIndex{121818}, nil, nil)
+}
+
+// Add offending app index greater than uint64
+func TestAccountInformationWithBadAppIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, nil, []basics.AppIndex{12181853637140359511}, nil)
+}
+
+// Add missing app index
+func TestAccountInformationWithMissingApp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, nil, []basics.AppIndex{121818}, nil)
+}
+
+// Add missing account address
+func TestAccountInformationWithMissingAddress(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ randAddr := basics.Address{}
+ crypto.RandBytes(randAddr[:])
+ accountInformationCheckWithOffendingFields(t, nil, nil, []basics.Address{randAddr})
+}
+
+func accountInformationCheckWithOffendingFields(t *testing.T,
+ foreignAssets []basics.AssetIndex,
+ foreignApps []basics.AppIndex,
+ accounts []basics.Address) {
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ proto, ok := config.Consensus[protocol.ConsensusFuture]
+ a.True(ok)
+ proto.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond
+ proto.AgreementFilterTimeout = 400 * time.Millisecond
+ fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto})
+
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV26.json"))
+ defer fixture.Shutdown()
+
+ client := fixture.LibGoalClient
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+
+ creator := accountList[0].Address
+ wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ user, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ fee := uint64(1000)
+
+ var txn transactions.Transaction
+
+ // Fund the manager, so it can issue transactions later on
+ txn, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
+ a.NoError(err)
+
+ round, err := client.CurrentRound()
+ a.NoError(err)
+ fixture.WaitForConfirmedTxn(round+4, creator, txn.ID().String())
+
+ // There should be no apps to start with
+ ad, err := client.AccountData(creator)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+
+ ad, err = client.AccountData(user)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+ a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
+
+ counter := `#pragma version 2
+// a simple global and local calls counter app
+byte b64 Y291bnRlcg== // counter
+dup
+app_global_get
+int 1
++
+app_global_put // update the counter
+int 0
+int 0
+app_opted_in
+bnz opted_in
+err
+opted_in:
+int 0 // account idx for app_local_put
+byte b64 Y291bnRlcg== // counter
+int 0
+byte b64 Y291bnRlcg==
+app_local_get
+int 1 // increment
++
+app_local_put
+int 1
+`
+ approvalOps, err := logic.AssembleString(counter)
+ a.NoError(err)
+ clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
+ a.NoError(err)
+ schema := basics.StateSchema{
+ NumUint: 1,
+ }
+
+ // create the app
+ tx, err := client.MakeUnsignedAppCreateTx(
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
+ a.NoError(err)
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err := client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ // ensure transaction is accepted into a block within 5 rounds.
+ confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()})
+ a.True(confirmed)
+
+ // check creator's balance record for the app entry and the state changes
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ var appIdx basics.AppIndex
+ var params basics.AppParams
+ for i, p := range ad.AppParams {
+ appIdx = i
+ params = p
+ break
+ }
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
+ value, ok := params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok := ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ txInfo, err := fixture.LibGoalClient.PendingTransactionInformation(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound := *txInfo.ConfirmedRound
+
+ // 1 global state update in total, 1 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1)
+
+ // call the app
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ if foreignAssets != nil {
+ tx.ForeignAssets = foreignAssets
+ }
+ if foreignApps != nil {
+ tx.ForeignApps = foreignApps
+ }
+ if accounts != nil {
+ tx.Accounts = accounts
+ }
+ tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
+ a.NoError(err)
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err = client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 3)
+ a.NoError(err)
+
+ // Ensure the txn committed
+ resp, err := client.GetPendingTransactions(2)
+ a.NoError(err)
+ a.Equal(uint64(0), resp.TotalTransactions)
+ txinfo, err := client.PendingTransactionInformation(txid)
+ a.NoError(err)
+ a.NotNil(txinfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+
+ // check creator's balance record for the app entry and the state changes
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ params, ok = ad.AppParams[appIdx]
+ a.True(ok)
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
+ value, ok = params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(2), value.Uint)
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok = ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ a.Equal(uint64(2), ad.TotalAppSchema.NumUint)
+
+ // check user's balance record for the app entry and the state changes
+ ad, err = client.AccountData(user)
+ a.NoError(err)
+ a.Equal(0, len(ad.AppParams))
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok = ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformation(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound = *txInfo.ConfirmedRound
+
+ // 2 global state update in total, 1 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 2, 1)
+
+ a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
+
+ app, err := client.ApplicationInformation(uint64(appIdx))
+ a.NoError(err)
+ a.Equal(uint64(appIdx), app.Id)
+ a.Equal(creator, app.Params.Creator)
+
+ // call the app
+ tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
+ a.NoError(err)
+ signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err = client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ for {
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 1)
+ a.NoError(err)
+ // Ensure the txn committed
+ resp, err := client.GetParsedPendingTransactions(2)
+ a.NoError(err)
+ if resp.TotalTransactions == 1 {
+ pendingTxn := resp.TopTransactions[0]
+ a.Equal(pendingTxn.Txn.ID().String(), txid)
+ continue
+ }
+ a.Equal(uint64(0), resp.TotalTransactions)
+ break
+ }
+
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ params, ok = ad.AppParams[appIdx]
+ a.True(ok)
+ value, ok = params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(3), value.Uint)
+
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(txInfo.ConfirmedRound)
a.NotZero(*txInfo.ConfirmedRound)
diff --git a/test/e2e-go/features/transactions/app_pages_test.go b/test/e2e-go/features/transactions/app_pages_test.go
index 5ab2e4ad1..f9a56c212 100644
--- a/test/e2e-go/features/transactions/app_pages_test.go
+++ b/test/e2e-go/features/transactions/app_pages_test.go
@@ -50,7 +50,7 @@ func TestExtraProgramPages(t *testing.T) {
walletHandle, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
- accountInfo, err := client.AccountInformationV2(baseAcct, false)
+ accountInfo, err := client.AccountInformation(baseAcct, false)
a.NoError(err)
if accountInfo.AppsTotalExtraPages != nil {
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(0))
@@ -89,7 +89,7 @@ return
// create app 1 with 1 extra page
app1ExtraPages := uint32(1)
- tx, err := client.MakeUnsignedAppCreateTx(transactions.NoOpOC, smallProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app1ExtraPages)
+ tx, err := client.MakeUnsignedAppCreateTx(transactions.NoOpOC, smallProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, nil, app1ExtraPages)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -98,19 +98,19 @@ return
_, err = fixture.WaitForConfirmedTxn(status.LastRound+5, baseAcct, txid)
a.NoError(err)
- app1CreateTxn, err := client.PendingTransactionInformationV2(txid)
+ app1CreateTxn, err := client.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(app1CreateTxn.ConfirmedRound)
a.NotNil(app1CreateTxn.ApplicationIndex)
app1ID := *app1CreateTxn.ApplicationIndex
- accountInfo, err = client.AccountInformationV2(baseAcct, false)
+ accountInfo, err = client.AccountInformation(baseAcct, false)
a.NoError(err)
a.NotNil(accountInfo.AppsTotalExtraPages)
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages))
// update app 1 and ensure the extra page still works
- tx, err = client.MakeUnsignedAppUpdateTx(app1ID, nil, nil, nil, nil, bigProgram, smallProgram)
+ tx, err = client.MakeUnsignedAppUpdateTx(app1ID, nil, nil, nil, nil, nil, bigProgram, smallProgram)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -119,18 +119,18 @@ return
_, err = fixture.WaitForConfirmedTxn(*app1CreateTxn.ConfirmedRound+5, baseAcct, txid)
a.NoError(err)
- app1UpdateTxn, err := client.PendingTransactionInformationV2(txid)
+ app1UpdateTxn, err := client.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(app1CreateTxn.ConfirmedRound)
- accountInfo, err = client.AccountInformationV2(baseAcct, false)
+ accountInfo, err = client.AccountInformation(baseAcct, false)
a.NoError(err)
a.NotNil(accountInfo.AppsTotalExtraPages)
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages))
// create app 2 with 2 extra pages
app2ExtraPages := uint32(2)
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, bigProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app2ExtraPages)
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, bigProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, nil, app2ExtraPages)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -139,19 +139,19 @@ return
_, err = fixture.WaitForConfirmedTxn(*app1UpdateTxn.ConfirmedRound+5, baseAcct, txid)
a.NoError(err)
- app2CreateTxn, err := client.PendingTransactionInformationV2(txid)
+ app2CreateTxn, err := client.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(app2CreateTxn.ConfirmedRound)
a.NotNil(app2CreateTxn.ApplicationIndex)
app2ID := *app2CreateTxn.ApplicationIndex
- accountInfo, err = client.AccountInformationV2(baseAcct, false)
+ accountInfo, err = client.AccountInformation(baseAcct, false)
a.NoError(err)
a.NotNil(accountInfo.AppsTotalExtraPages)
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages+app2ExtraPages))
// delete app 1
- tx, err = client.MakeUnsignedAppDeleteTx(app1ID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppDeleteTx(app1ID, nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -160,17 +160,17 @@ return
_, err = fixture.WaitForConfirmedTxn(*app2CreateTxn.ConfirmedRound+5, baseAcct, txid)
a.NoError(err)
- app1DeleteTxn, err := client.PendingTransactionInformationV2(txid)
+ app1DeleteTxn, err := client.PendingTransactionInformation(txid)
a.NoError(err)
a.NotNil(app1DeleteTxn.ConfirmedRound)
- accountInfo, err = client.AccountInformationV2(baseAcct, false)
+ accountInfo, err = client.AccountInformation(baseAcct, false)
a.NoError(err)
a.NotNil(accountInfo.AppsTotalExtraPages)
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app2ExtraPages))
// delete app 2
- tx, err = client.MakeUnsignedAppDeleteTx(app2ID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppDeleteTx(app2ID, nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -179,7 +179,7 @@ return
_, err = fixture.WaitForConfirmedTxn(*app1DeleteTxn.ConfirmedRound+5, baseAcct, txid)
a.NoError(err)
- accountInfo, err = client.AccountInformationV2(baseAcct, false)
+ accountInfo, err = client.AccountInformation(baseAcct, false)
a.NoError(err)
if accountInfo.AppsTotalExtraPages != nil {
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(0))
diff --git a/test/e2e-go/features/transactions/application_test.go b/test/e2e-go/features/transactions/application_test.go
index a5786cc4f..380264b29 100644
--- a/test/e2e-go/features/transactions/application_test.go
+++ b/test/e2e-go/features/transactions/application_test.go
@@ -97,7 +97,7 @@ log
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 9616f3cb0..0802308b3 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
@@ -37,7 +37,7 @@ import (
type assetIDParams struct {
idx uint64
- params v1.AssetParams
+ params model.AssetParams
}
func helperFillSignBroadcast(client libgoal.Client, wh []byte, sender string, tx transactions.Transaction, err error) (string, error) {
@@ -226,9 +226,10 @@ func TestAssetConfig(t *testing.T) {
a.NoError(err)
// There should be no assets to start with
- info, err := client.AccountInformation(account0)
+ info, err := client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 0)
// Create max number of assets, or 1000 if the number of assets are unlimitd.
maxAssetsCount := config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount
@@ -268,21 +269,38 @@ func TestAssetConfig(t *testing.T) {
a.True(strings.Contains(err.Error(), "too many assets in account:"))
}
+ // Helper methods for dereferencing asset fields
+ derefString := func(sp *string) string {
+ if sp != nil {
+ return *sp
+ }
+ return ""
+ }
+ derefByteArray := func(ba *[]byte) []byte {
+ if ba != nil {
+ return *ba
+ }
+ return []byte{}
+ }
+
// Check that assets are visible
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(maxAssetsCount, len(info.AssetParams))
+ a.NotNil(info.Assets)
+ a.Equal(maxAssetsCount, len(*info.CreatedAssets))
var assets []assetIDParams
- for idx, cp := range info.AssetParams {
+ for _, asset := range *info.CreatedAssets {
+ idx := asset.Index
+ cp := asset.Params
assets = append(assets, assetIDParams{idx, cp})
- a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total-1))
- a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total-1))
- a.Equal(cp.ManagerAddr, manager)
- a.Equal(cp.ReserveAddr, reserve)
- a.Equal(cp.FreezeAddr, freeze)
- a.Equal(cp.ClawbackAddr, clawback)
- a.Equal(cp.MetadataHash, assetMetadataHash)
- a.Equal(cp.URL, assetURL)
+ a.Equal(derefString(cp.UnitName), fmt.Sprintf("test%d", cp.Total-1))
+ a.Equal(derefString(cp.Name), fmt.Sprintf("testname%d", cp.Total-1))
+ a.Equal(derefString(cp.Manager), manager)
+ a.Equal(derefString(cp.Reserve), reserve)
+ a.Equal(derefString(cp.Freeze), freeze)
+ a.Equal(derefString(cp.Clawback), clawback)
+ a.Equal(derefByteArray(cp.MetadataHash), assetMetadataHash)
+ a.Equal(derefString(cp.Url), assetURL)
}
// re-generate wh, since this test takes a while and sometimes
@@ -334,41 +352,44 @@ func TestAssetConfig(t *testing.T) {
confirmed = fixture.WaitForAllTxnsToConfirm(status.LastRound+20, txids)
a.True(confirmed, "changing keys")
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(maxAssetsCount, len(info.AssetParams))
- for idx, cp := range info.AssetParams {
- a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total-1))
- a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total-1))
+ a.NotNil(info.CreatedAssets)
+ a.Equal(maxAssetsCount, len(*info.CreatedAssets))
+ for _, asset := range *info.CreatedAssets {
+ idx := asset.Index
+ cp := asset.Params
+ a.Equal(derefString(cp.UnitName), fmt.Sprintf("test%d", cp.Total-1))
+ a.Equal(derefString(cp.Name), fmt.Sprintf("testname%d", cp.Total-1))
if idx == assets[0].idx {
- a.Equal(cp.ManagerAddr, account0)
+ a.Equal(derefString(cp.Manager), account0)
} else {
- a.Equal(cp.ManagerAddr, manager)
+ a.Equal(derefString(cp.Manager), manager)
}
if idx == assets[1].idx {
- a.Equal(cp.ReserveAddr, account0)
+ a.Equal(derefString(cp.Reserve), account0)
} else if idx == assets[4].idx {
- a.Equal(cp.ReserveAddr, "")
+ a.Equal(derefString(cp.Reserve), "")
} else {
- a.Equal(cp.ReserveAddr, reserve)
+ a.Equal(derefString(cp.Reserve), reserve)
}
if idx == assets[2].idx {
- a.Equal(cp.FreezeAddr, account0)
+ a.Equal(derefString(cp.Freeze), account0)
} else if idx == assets[5].idx {
- a.Equal(cp.FreezeAddr, "")
+ a.Equal(derefString(cp.Freeze), "")
} else {
- a.Equal(cp.FreezeAddr, freeze)
+ a.Equal(derefString(cp.Freeze), freeze)
}
if idx == assets[3].idx {
- a.Equal(cp.ClawbackAddr, account0)
+ a.Equal(derefString(cp.Clawback), account0)
} else if idx == assets[6].idx {
- a.Equal(cp.ClawbackAddr, "")
+ a.Equal(derefString(cp.Clawback), "")
} else {
- a.Equal(cp.ClawbackAddr, clawback)
+ a.Equal(derefString(cp.Clawback), clawback)
}
}
@@ -385,7 +406,8 @@ func TestAssetConfig(t *testing.T) {
// Destroy assets
txids = make(map[string]string)
- for idx := range info.AssetParams {
+ for _, asset := range *info.CreatedAssets {
+ idx := asset.Index
// re-generate wh, since this test takes a while and sometimes
// the wallet handle expires.
wh, err = client.GetUnencryptedWalletHandle()
@@ -451,12 +473,7 @@ func TestAssetInformation(t *testing.T) {
a.NoError(err)
// There should be no assets to start with
- info, err := client.AccountInformation(account0)
- a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
-
- // There should be no assets to start with
- info2, err := client.AccountInformationV2(account0, true)
+ info2, err := client.AccountInformation(account0, true)
a.NoError(err)
a.NotNil(info2.CreatedAssets)
a.Equal(len(*info2.CreatedAssets), 0)
@@ -475,27 +492,19 @@ func TestAssetInformation(t *testing.T) {
a.True(confirmed, "creating assets")
// Check that AssetInformation returns the correct AssetParams
- info, err = client.AccountInformation(account0)
- a.NoError(err)
- for idx, cp := range info.AssetParams {
- assetInfo, err := client.AssetInformation(idx)
- a.NoError(err)
- a.Equal(cp, assetInfo)
- }
-
- // Check that AssetInformationV2 returns the correct AssetParams
- info2, err = client.AccountInformationV2(account0, true)
+ info2, err = client.AccountInformation(account0, true)
a.NoError(err)
a.NotNil(info2.CreatedAssets)
for _, cp := range *info2.CreatedAssets {
- asset, err := client.AssetInformationV2(cp.Index)
+ asset, err := client.AssetInformation(cp.Index)
a.NoError(err)
a.Equal(cp, asset)
}
// Destroy assets
txids = make(map[string]string)
- for idx := range info.AssetParams {
+ for _, asset := range *info2.CreatedAssets {
+ idx := asset.Index
tx, err := client.MakeUnsignedAssetDestroyTx(idx)
txid, err := helperFillSignBroadcast(client, wh, manager, tx, err)
a.NoError(err)
@@ -639,10 +648,13 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) {
txids = make(map[string]string)
// asset 1 (create + send) exists and available
- assetParams, err := client1.AssetInformation(assetID1)
+ asset, err := client1.AssetInformation(assetID1)
+ assetParams := asset.Params
a.NoError(err)
- a.Equal(assetName1, assetParams.AssetName)
- a.Equal(assetUnitName1, assetParams.UnitName)
+ a.NotNil(assetParams.Name)
+ a.Equal(assetName1, *assetParams.Name)
+ a.NotNil(*assetParams.UnitName)
+ a.Equal(assetUnitName1, *assetParams.UnitName)
a.Equal(account0, assetParams.Creator)
a.Equal(assetTotal, assetParams.Total)
// sending it should succeed
@@ -730,16 +742,19 @@ func TestAssetSend(t *testing.T) {
confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
a.True(confirmed, "creating assets")
- info, err := client.AccountInformation(account0)
+ info, err := client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 2)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 2)
var frozenIdx, nonFrozenIdx uint64
- for idx, cp := range info.AssetParams {
- if cp.UnitName == "frozen" {
+ for _, asset := range *info.CreatedAssets {
+ idx := asset.Index
+ cp := asset.Params
+ if cp.UnitName != nil && *cp.UnitName == "frozen" {
frozenIdx = idx
}
- if cp.UnitName == "nofreeze" {
+ if cp.UnitName != nil && *cp.UnitName == "nofreeze" {
nonFrozenIdx = idx
}
}
@@ -820,13 +835,19 @@ func TestAssetSend(t *testing.T) {
confirmed = fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
a.True(confirmed, "creating asset slots")
- info, err = client.AccountInformation(extra)
- a.NoError(err)
- a.Equal(len(info.Assets), 2)
- a.Equal(info.Assets[frozenIdx].Amount, uint64(0))
- a.Equal(info.Assets[frozenIdx].Frozen, true)
- a.Equal(info.Assets[nonFrozenIdx].Amount, uint64(10))
- a.Equal(info.Assets[nonFrozenIdx].Frozen, false)
+ info, err = client.AccountInformation(extra, true)
+ a.NoError(err)
+ a.NotNil(info.Assets)
+ a.Equal(len(*info.Assets), 2)
+ for _, asset := range *info.Assets {
+ if asset.AssetID == frozenIdx {
+ a.Equal(asset.Amount, uint64(0))
+ a.Equal(asset.IsFrozen, true)
+ } else if asset.AssetID == nonFrozenIdx {
+ a.Equal(asset.Amount, uint64(10))
+ a.Equal(asset.IsFrozen, false)
+ }
+ }
// Should not be able to send more than is available
tx, err = client.MakeUnsignedAssetSendTx(nonFrozenIdx, 11, extra, "", "")
@@ -893,17 +914,29 @@ func TestAssetSend(t *testing.T) {
a.True(confirmed, "clawback")
// Check that the asset balances are correct
- info, err = client.AccountInformation(account0)
- a.NoError(err)
- a.Equal(len(info.Assets), 2)
- a.Equal(info.Assets[frozenIdx].Amount, uint64(95))
- a.Equal(info.Assets[nonFrozenIdx].Amount, uint64(95))
+ info, err = client.AccountInformation(account0, true)
+ a.NoError(err)
+ a.NotNil(info.Assets)
+ a.Equal(len(*info.Assets), 2)
+ for _, asset := range *info.Assets {
+ if asset.AssetID == frozenIdx {
+ a.Equal(asset.Amount, uint64(95))
+ } else if asset.AssetID == nonFrozenIdx {
+ a.Equal(asset.Amount, uint64(95))
+ }
+ }
- info, err = client.AccountInformation(extra)
+ info, err = client.AccountInformation(extra, true)
a.NoError(err)
- a.Equal(len(info.Assets), 2)
- a.Equal(info.Assets[frozenIdx].Amount, uint64(5))
- a.Equal(info.Assets[nonFrozenIdx].Amount, uint64(5))
+ a.NotNil(info.Assets)
+ a.Equal(len(*info.Assets), 2)
+ for _, asset := range *info.Assets {
+ if asset.AssetID == frozenIdx {
+ a.Equal(asset.Amount, uint64(5))
+ } else if asset.AssetID == nonFrozenIdx {
+ a.Equal(asset.Amount, uint64(5))
+ }
+ }
// Should be able to close out asset slots and close entire account.
tx, err = client.MakeUnsignedAssetFreezeTx(nonFrozenIdx, extra, false)
@@ -930,22 +963,24 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
defer fixture.Shutdown()
// There should be no assets to start with
- info, err := client.AccountInformation(account0)
+ info, err := client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 0)
manager, reserve, freeze, clawback := setupActors(account0, client, a)
createAsset("test", account0, manager, reserve, freeze, clawback, client, fixture, a)
// Check that asset is visible
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 1)
- var asset v1.AssetParams
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 1)
+ var asset model.AssetParams
var assetIndex uint64
- for idx, cp := range info.AssetParams {
- asset = cp
- assetIndex = idx
+ for _, cp := range *info.CreatedAssets {
+ asset = cp.Params
+ assetIndex = cp.Index
}
assetURL := "foo://bar"
@@ -961,12 +996,13 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
client = &fixture.LibGoalClient
// Check again that asset is visible
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 1)
- for idx, cp := range info.AssetParams {
- asset = cp
- assetIndex = idx
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 1)
+ for _, cp := range *info.CreatedAssets {
+ asset = cp.Params
+ assetIndex = cp.Index
}
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
@@ -984,9 +1020,11 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
submitAndWaitForTransaction(manager, tx, "destroying assets", client, fixture, a)
// Check again that asset is destroyed
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
+ a.NoError(err)
a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 0)
// Should be able to close now
wh, err := client.GetUnencryptedWalletHandle()
@@ -1026,22 +1064,24 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
defer fixture.Shutdown()
// There should be no assets to start with
- info, err := client.AccountInformation(account0)
+ info, err := client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 0)
manager, reserve, freeze, clawback := setupActors(account0, client, a)
createAsset("test", account0, manager, reserve, freeze, clawback, client, fixture, a)
// Check that asset is visible
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 1)
- var asset v1.AssetParams
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 1)
+ var asset model.AssetParams
var assetIndex uint64
- for idx, cp := range info.AssetParams {
- asset = cp
- assetIndex = idx
+ for _, cp := range *info.CreatedAssets {
+ asset = cp.Params
+ assetIndex = cp.Index
}
assetURL := "foo://bar"
@@ -1059,12 +1099,13 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
a.NoError(err)
// Check again that asset is visible
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 1)
- for idx, cp := range info.AssetParams {
- asset = cp
- assetIndex = idx
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 1)
+ for _, cp := range *info.CreatedAssets {
+ asset = cp.Params
+ assetIndex = cp.Index
}
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
@@ -1082,9 +1123,10 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
submitAndWaitForTransaction(manager, tx, "destroying assets", client, fixture, a)
// Check again that asset is destroyed
- info, err = client.AccountInformation(account0)
+ info, err = client.AccountInformation(account0, true)
a.NoError(err)
- a.Equal(len(info.AssetParams), 0)
+ a.NotNil(info.CreatedAssets)
+ a.Equal(len(*info.CreatedAssets), 0)
// Should be able to close now
wh, err := client.GetUnencryptedWalletHandle()
@@ -1182,16 +1224,16 @@ func submitAndWaitForTransaction(sender string, tx transactions.Transaction, mes
asser.True(confirmed, message)
}
-func verifyAssetParameters(asset v1.AssetParams,
+func verifyAssetParameters(asset model.AssetParams,
unitName, assetName, manager, reserve, freeze, clawback string,
metadataHash []byte, assetURL string, asser *require.Assertions) {
- asser.Equal(asset.UnitName, unitName)
- asser.Equal(asset.AssetName, assetName)
- asser.Equal(asset.ManagerAddr, manager)
- asser.Equal(asset.ReserveAddr, reserve)
- asser.Equal(asset.FreezeAddr, freeze)
- asser.Equal(asset.ClawbackAddr, clawback)
- asser.Equal(asset.MetadataHash, metadataHash)
- asser.Equal(asset.URL, assetURL)
+ asser.Equal(*asset.UnitName, unitName)
+ asser.Equal(*asset.Name, assetName)
+ asser.Equal(*asset.Manager, manager)
+ asser.Equal(*asset.Reserve, reserve)
+ asser.Equal(*asset.Freeze, freeze)
+ asser.Equal(*asset.Clawback, clawback)
+ asser.Equal(*asset.MetadataHash, metadataHash)
+ asser.Equal(*asset.Url, assetURL)
}
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index fdda7eea6..2cbfa793a 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -64,17 +64,17 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
becomesNonparticipating := accountList[2].Address // 10% stake
// assert that initiallyOfflineAccount is offline
- initiallyOfflineAccountStatus, err := client.AccountInformation(initiallyOffline)
+ initiallyOfflineAccountStatus, err := client.AccountInformation(initiallyOffline, false)
a.NoError(err)
a.Equal(initiallyOfflineAccountStatus.Status, basics.Offline.String())
// assert that initiallyOnlineAccount is online
- initiallyOnlineAccountStatus, err := client.AccountInformation(initiallyOnline)
+ initiallyOnlineAccountStatus, err := client.AccountInformation(initiallyOnline, false)
a.NoError(err)
a.Equal(initiallyOnlineAccountStatus.Status, basics.Online.String())
// assert that the account that will become nonparticipating hasn't yet been marked as such
- unmarkedAccountStatus, err := client.AccountInformation(becomesNonparticipating)
+ unmarkedAccountStatus, err := client.AccountInformation(becomesNonparticipating, false)
a.NoError(err)
a.NotEqual(unmarkedAccountStatus.Status, basics.NotParticipating.String())
@@ -132,18 +132,18 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
fixture.WaitForRoundWithTimeout(curRound + uint64(1))
// assert that initiallyOffline is now online
- initiallyOfflineAccountStatus, err = client.AccountInformation(initiallyOffline)
+ initiallyOfflineAccountStatus, err = client.AccountInformation(initiallyOffline, false)
a.NoError(err)
a.Equal(initiallyOfflineAccountStatus.Status, basics.Online.String())
// assert that initiallyOnline is now offline
- initiallyOnlineAccountStatus, err = client.AccountInformation(initiallyOnline)
+ initiallyOnlineAccountStatus, err = client.AccountInformation(initiallyOnline, false)
a.NoError(err)
a.Equal(initiallyOnlineAccountStatus.Status, basics.Offline.String())
if doNonparticipationTest {
// assert that becomesNonparticipating is no longer participating
- unmarkedAccountStatus, err = client.AccountInformation(becomesNonparticipating)
+ unmarkedAccountStatus, err = client.AccountInformation(becomesNonparticipating, false)
a.NoError(err)
a.Equal(unmarkedAccountStatus.Status, basics.NotParticipating.String())
}
diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go
index 8de0dcb02..bb5175c92 100644
--- a/test/e2e-go/features/transactions/proof_test.go
+++ b/test/e2e-go/features/transactions/proof_test.go
@@ -94,14 +94,15 @@ func TestTxnMerkleProof(t *testing.T) {
txidSHA256 := tx.IDSha256() // only used for verification
confirmedTx, err := fixture.WaitForConfirmedTxn(status.LastRound+10, baseAcct, txid.String())
a.NoError(err)
+ a.NotNil(confirmedTx.ConfirmedRound)
- blk, err := client.BookkeepingBlock(confirmedTx.ConfirmedRound)
+ blk, err := client.BookkeepingBlock(*confirmedTx.ConfirmedRound)
a.NoError(err)
- proofresp, proof, err := fixture.TransactionProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha512_256)
+ proofresp, proof, err := fixture.TransactionProof(txid.String(), *confirmedTx.ConfirmedRound, crypto.Sha512_256)
a.NoError(err)
- proofrespSHA256, proofSHA256, err := fixture.TransactionProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha256)
+ proofrespSHA256, proofSHA256, err := fixture.TransactionProof(txid.String(), *confirmedTx.ConfirmedRound, crypto.Sha256)
a.NoError(err)
element := TxnMerkleElemRaw{Txn: crypto.Digest(txid)}
@@ -176,8 +177,9 @@ func TestTxnMerkleProofSHA256(t *testing.T) {
txid := tx.ID()
confirmedTx, err := fixture.WaitForConfirmedTxn(status.LastRound+10, baseAcct, txid.String())
a.NoError(err)
+ a.NotNil(confirmedTx.ConfirmedRound)
- blk, err := client.BookkeepingBlock(confirmedTx.ConfirmedRound)
+ blk, err := client.BookkeepingBlock(*confirmedTx.ConfirmedRound)
a.NoError(err)
proto := config.Consensus[blk.CurrentProtocol]
a.False(proto.EnableSHA256TxnCommitmentHeader)
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index d17bd7dd2..e09c67a6b 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -23,7 +23,8 @@ import (
"github.com/stretchr/testify/require"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -125,12 +126,12 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i
expectedPingBalance = expectedPingBalance - transactionFee - amountPingSendsPong + amountPongSendsPing
expectedPongBalance = expectedPongBalance - transactionFee - amountPongSendsPing + amountPingSendsPong
- var pongTxInfo, pingTxInfo v1.Transaction
+ var pongTxInfo, pingTxInfo model.PendingTransactionResponse
pongTxInfo, err = pongClient.PendingTransactionInformation(pongTx.ID().String())
if err == nil {
pingTxInfo, err = pingClient.PendingTransactionInformation(pingTx.ID().String())
}
- waitForTransaction = err != nil || pongTxInfo.ConfirmedRound == 0 || pingTxInfo.ConfirmedRound == 0
+ waitForTransaction = err != nil || (pongTxInfo.ConfirmedRound != nil && *pongTxInfo.ConfirmedRound == 0) || (pingTxInfo.ConfirmedRound != nil && *pingTxInfo.ConfirmedRound == 0)
if waitForTransaction {
curStatus, _ := pongClient.Status()
curRound := curStatus.LastRound
diff --git a/test/e2e-go/perf/basic_test.go b/test/e2e-go/perf/basic_test.go
index dd70d422d..b4044303a 100644
--- a/test/e2e-go/perf/basic_test.go
+++ b/test/e2e-go/perf/basic_test.go
@@ -26,10 +26,9 @@ import (
"github.com/stretchr/testify/require"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
@@ -125,7 +124,7 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
// goroutines to talk to algod and kmd.
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100
- var status generatedV2.NodeStatusResponse
+ var status model.NodeStatusResponse
b.Run(template, func(b *testing.B) {
for i := 0; i < b.N; i++ {
@@ -240,11 +239,11 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) {
fmt.Printf("Block size statistics:\n")
for round := status.LastRound + 1; ; round++ {
- blk, err := c.Block(round)
+ blk, err := c.BookkeepingBlock(round)
if err != nil {
break
}
- fmt.Printf(" %d: %d txns\n", round, len(blk.Transactions.Transactions))
+ fmt.Printf(" %d: %d txns\n", round, len(blk.Payset))
}
}
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 1c9c94216..4592fd426 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -18,25 +18,28 @@ package restapi
import (
"context"
+ "encoding/binary"
"encoding/hex"
"errors"
"flag"
-
+ "fmt"
"math"
"math/rand"
"os"
"path/filepath"
+ "sort"
"strings"
"testing"
"time"
- "unicode"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
- v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ "github.com/algorand/go-algorand/daemon/algod/api/client"
+ v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -99,16 +102,6 @@ func mutateStringAtIndex(in string, i int) (out string) {
return out
}
-// checks whether a string is all letters-or-spaces
-func isLetterOrSpace(s string) bool {
- for _, r := range s {
- if !unicode.IsLetter(r) && !unicode.IsSpace(r) {
- return false
- }
- }
- return true
-}
-
func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string) (someBal uint64, someAddress string) {
a := require.New(fixtures.SynchronizedTest(t))
someBal = 0
@@ -161,7 +154,7 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) {
var errWaitForTransactionTimeout = errors.New("wait for transaction timed out")
-func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, txID string, timeout time.Duration) (tx v1.Transaction, err error) {
+func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, txID string, timeout time.Duration) (tx v2.PreEncodedTxInfo, err error) {
a := require.New(fixtures.SynchronizedTest(t))
rnd, err := testClient.Status()
a.NoError(err)
@@ -170,14 +163,11 @@ func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, tx
}
timeoutTime := time.Now().Add(timeout)
for {
- tx, err = testClient.TransactionInformation(fromAddress, txID)
- if err != nil && strings.HasPrefix(err.Error(), "HTTP 404") {
- tx, err = testClient.PendingTransactionInformation(txID)
- }
+ tx, err = testClient.ParsedPendingTransaction(txID)
if err == nil {
a.NotEmpty(tx)
a.Empty(tx.PoolError)
- if tx.ConfirmedRound > 0 {
+ if tx.ConfirmedRound != nil && *tx.ConfirmedRound > 0 {
return
}
}
@@ -220,55 +210,6 @@ func TestClientCanGetStatusAfterBlock(t *testing.T) {
a.NotEmpty(statusResponse)
}
-func TestTransactionsByAddr(t *testing.T) {
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- a := require.New(fixtures.SynchronizedTest(t))
- var localFixture fixtures.RestClientFixture
- localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
- defer localFixture.Shutdown()
-
- testClient := localFixture.LibGoalClient
- waitForRoundOne(t, testClient)
- wh, err := testClient.GetUnencryptedWalletHandle()
- a.NoError(err)
- addresses, err := testClient.ListAddresses(wh)
- a.NoError(err)
- _, someAddress := getMaxBalAddr(t, testClient, addresses)
- if someAddress == "" {
- t.Error("no addr with funds")
- }
- toAddress := getDestAddr(t, testClient, addresses, someAddress, wh)
- tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0)
- a.NoError(err)
- txID := tx.ID()
- rnd, err := testClient.Status()
- a.NoError(err)
- t.Logf("rnd[%d] created txn %s", rnd.LastRound, txID)
- _, err = waitForTransaction(t, testClient, someAddress, txID.String(), 30*time.Second)
- a.NoError(err)
-
- // what is my round?
- rnd, err = testClient.Status()
- a.NoError(err)
- t.Logf("rnd %d", rnd.LastRound)
-
- // Now let's get the transaction
-
- restClient, err := localFixture.NC.AlgodClient()
- a.NoError(err)
- res, err := restClient.TransactionsByAddr(toAddress, 0, rnd.LastRound, 100)
- a.NoError(err)
- a.Equal(1, len(res.Transactions))
-
- for _, tx := range res.Transactions {
- a.Equal(tx.From, someAddress)
- a.Equal(tx.Payment.Amount, uint64(100000))
- a.Equal(tx.Fee, uint64(10000))
- }
-}
-
func TestClientCanGetVersion(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
@@ -302,7 +243,7 @@ func TestClientCanGetMinTxnFee(t *testing.T) {
testClient := fixture.LibGoalClient
suggestedParamsRes, err := testClient.SuggestedParams()
a.NoError(err)
- a.Truef(suggestedParamsRes.MinTxnFee > 0, "min txn fee not supplied")
+ a.Truef(suggestedParamsRes.MinFee > 0, "min txn fee not supplied")
}
func TestClientCanGetBlockInfo(t *testing.T) {
@@ -466,7 +407,7 @@ func TestClientCanSendAndGetNote(t *testing.T) {
a.NoError(err)
txStatus, err := waitForTransaction(t, testClient, someAddress, tx.ID().String(), 30*time.Second)
a.NoError(err)
- a.Equal(note, txStatus.Note)
+ a.Equal(note, txStatus.Txn.Txn.Note)
}
func TestClientCanGetTransactionStatus(t *testing.T) {
@@ -518,7 +459,7 @@ func TestAccountBalance(t *testing.T) {
_, err = waitForTransaction(t, testClient, someAddress, tx.ID().String(), 30*time.Second)
a.NoError(err)
- account, err := testClient.AccountInformation(toAddress)
+ account, err := testClient.AccountInformation(toAddress, false)
a.NoError(err)
a.Equal(account.AmountWithoutPendingRewards, uint64(100000))
a.Truef(account.Amount >= 100000, "account must have received money, and account information endpoint must print it")
@@ -542,6 +483,7 @@ func TestAccountParticipationInfo(t *testing.T) {
}
a.NoError(err)
addr, err := basics.UnmarshalChecksumAddress(someAddress)
+ a.NoError(err)
params, err := testClient.SuggestedParams()
a.NoError(err)
@@ -584,12 +526,12 @@ func TestAccountParticipationInfo(t *testing.T) {
_, err = waitForTransaction(t, testClient, someAddress, txID, 30*time.Second)
a.NoError(err)
- account, err := testClient.AccountInformation(someAddress)
+ account, err := testClient.AccountInformation(someAddress, false)
a.NoError(err)
- a.Equal(randomVotePKStr, string(account.Participation.ParticipationPK), "API must print correct root voting key")
- a.Equal(randomSelPKStr, string(account.Participation.VRFPK), "API must print correct vrf key")
- a.Equal(uint64(firstRound), account.Participation.VoteFirst, "API must print correct first participation round")
- a.Equal(uint64(lastRound), account.Participation.VoteLast, "API must print correct last participation round")
+ a.Equal(randomVotePKStr, string(account.Participation.VoteParticipationKey), "API must print correct root voting key")
+ a.Equal(randomSelPKStr, string(account.Participation.SelectionParticipationKey), "API must print correct vrf key")
+ a.Equal(uint64(firstRound), account.Participation.VoteFirstValid, "API must print correct first participation round")
+ a.Equal(uint64(lastRound), account.Participation.VoteLastValid, "API must print correct last participation round")
a.Equal(dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution")
// TODO: should we update the v1 API to support state proof? Currently it does not return this field.
}
@@ -620,7 +562,7 @@ func TestClientCanGetGoRoutines(t *testing.T) {
goRoutines, err := testClient.GetGoRoutines(ctx)
a.NoError(err)
a.NotEmpty(goRoutines)
- a.True(strings.Index(goRoutines, "goroutine profile:") >= 0)
+ a.True(strings.Contains(goRoutines, "goroutine profile:"))
}
func TestSendingTooMuchFails(t *testing.T) {
@@ -846,13 +788,15 @@ func TestClientCanGetPendingTransactions(t *testing.T) {
// Check that a single pending txn is corectly displayed
tx, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee, minAcctBalance, nil)
a.NoError(err)
- statusResponse, err := testClient.GetPendingTransactions(0)
+ statusResponse, err := testClient.GetParsedPendingTransactions(0)
a.NoError(err)
a.NotEmpty(statusResponse)
- a.True(statusResponse.TotalTxns == 1)
- a.True(len(statusResponse.TruncatedTxns.Transactions) == 1)
- a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == tx.ID().String())
+ a.True(statusResponse.TotalTransactions == 1)
+ a.True(len(statusResponse.TopTransactions) == 1)
+ // Parse response into SignedTxn
+ pendingTxn := statusResponse.TopTransactions[0]
+ a.True(pendingTxn.Txn.ID().String() == tx.ID().String())
}
func TestClientTruncatesPendingTransactions(t *testing.T) {
@@ -884,14 +828,13 @@ func TestClientTruncatesPendingTransactions(t *testing.T) {
a.NoError(err)
txIDsSeen[tx2.ID().String()] = true
}
- statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns))
+ statusResponse, err := testClient.GetParsedPendingTransactions(uint64(MaxTxns))
a.NoError(err)
- a.NotEmpty(statusResponse)
- a.True(int(statusResponse.TotalTxns) == NumTxns)
- a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
- for _, tx := range statusResponse.TruncatedTxns.Transactions {
- a.True(txIDsSeen[tx.TxID])
- delete(txIDsSeen, tx.TxID)
+ a.True(int(statusResponse.TotalTransactions) == NumTxns)
+ a.True(len(statusResponse.TopTransactions) == MaxTxns)
+ for _, tx := range statusResponse.TopTransactions {
+ a.True(txIDsSeen[tx.Txn.ID().String()])
+ delete(txIDsSeen, tx.Txn.ID().String())
}
a.True(len(txIDsSeen) == NumTxns-MaxTxns)
}
@@ -931,12 +874,14 @@ func TestClientPrioritizesPendingTransactions(t *testing.T) {
txHigh, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee*10, minAcctBalance, nil)
a.NoError(err)
- statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns))
+ statusResponse, err := testClient.GetParsedPendingTransactions(uint64(MaxTxns))
a.NoError(err)
a.NotEmpty(statusResponse)
- a.True(int(statusResponse.TotalTxns) == NumTxns+1)
- a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
- a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String())
+ a.True(int(statusResponse.TotalTransactions) == NumTxns+1)
+ a.True(len(statusResponse.TopTransactions) == MaxTxns)
+
+ pendingTxn := statusResponse.TopTransactions[0]
+ a.True(pendingTxn.Txn.ID().String() == txHigh.ID().String())
}
func TestPendingTransactionInfoInnerTxnAssetCreate(t *testing.T) {
@@ -985,15 +930,17 @@ int 1
return
`
ops, err := logic.AssembleString(prog)
+ a.NoError(err)
approv := ops.Program
ops, err = logic.AssembleString("#pragma version 5 \nint 1")
clst := ops.Program
+ a.NoError(err)
gl := basics.StateSchema{}
lc := basics.StateSchema{}
// create app
- appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(0, nil, nil, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(0, nil, nil, nil, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
a.NoError(err)
appCreateTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCreateTxn)
a.NoError(err)
@@ -1003,7 +950,7 @@ return
a.NoError(err)
// get app ID
- submittedAppCreateTxn, err := testClient.PendingTransactionInformationV2(appCreateTxID)
+ submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
a.NoError(err)
a.NotNil(submittedAppCreateTxn.ApplicationIndex)
createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
@@ -1017,7 +964,7 @@ return
a.NoError(err)
// call app, which will issue an ASA create inner txn
- appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(uint64(createdAppID), nil, nil, nil, nil)
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(uint64(createdAppID), nil, nil, nil, nil, nil)
a.NoError(err)
appCallTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCallTxn)
a.NoError(err)
@@ -1027,7 +974,7 @@ return
a.NoError(err)
// verify pending txn info of outer txn
- submittedAppCallTxn, err := testClient.PendingTransactionInformationV2(appCallTxnTxID)
+ submittedAppCallTxn, err := testClient.PendingTransactionInformation(appCallTxnTxID)
a.NoError(err)
a.Nil(submittedAppCallTxn.ApplicationIndex)
a.Nil(submittedAppCallTxn.AssetIndex)
@@ -1041,7 +988,7 @@ return
createdAssetID := *innerTxn.AssetIndex
a.Greater(createdAssetID, uint64(0))
- createdAssetInfo, err := testClient.AssetInformationV2(createdAssetID)
+ createdAssetInfo, err := testClient.AssetInformation(createdAssetID)
a.NoError(err)
a.Equal(createdAssetID, createdAssetInfo.Index)
a.Equal(createdAppID.Address().String(), createdAssetInfo.Params.Creator)
@@ -1122,7 +1069,7 @@ func TestStateProofInParticipationInfo(t *testing.T) {
_, err = waitForTransaction(t, testClient, someAddress, txID, 120*time.Second)
a.NoError(err)
- account, err := testClient.AccountInformationV2(someAddress, false)
+ account, err := testClient.AccountInformation(someAddress, false)
a.NoError(err)
a.NotNil(account.Participation.StateProofKey)
@@ -1148,6 +1095,7 @@ func TestStateProofParticipationKeysAPI(t *testing.T) {
a.NoError(err)
partkey, err := account.RestoreParticipation(partdb)
+ a.NoError(err)
pRoot, err := testClient.GetParticipationKeys()
a.NoError(err)
@@ -1218,7 +1166,394 @@ func TestNilStateProofInParticipationInfo(t *testing.T) {
_, err = waitForTransaction(t, testClient, someAddress, txID, 30*time.Second)
a.NoError(err)
- account, err := testClient.AccountInformationV2(someAddress, false)
+ account, err := testClient.AccountInformation(someAddress, false)
a.NoError(err)
a.Nil(account.Participation.StateProofKey)
}
+
+func TestBoxNamesByAppID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ testClient.WaitForRound(1)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, someAddress := getMaxBalAddr(t, testClient, addresses)
+ if someAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ prog := `#pragma version 8
+ txn ApplicationID
+ bz end // create the app
+ txn NumAppArgs
+ bz end // approve when no app args
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 5 // [5]
+ txn ApplicationArgs 1 // [5, arg[1]]
+ swap
+ box_create // [] // boxes: arg[1] -> [5]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz bad // "delete" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+bad:
+ err
+end:
+ int 1
+`
+ ops, err := logic.AssembleString(prog)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ // create app
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(
+ 0, nil, nil, nil,
+ nil, nil, transactions.NoOpOC,
+ approval, clearState, gl, lc, 0,
+ )
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+
+ // get app ID
+ submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
+ a.NoError(err)
+ a.NotNil(submittedAppCreateTxn.ApplicationIndex)
+ createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+ a.Greater(uint64(createdAppID), uint64(0))
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, someAddress, createdAppID.Address().String(),
+ 0, 10_000_000, nil, "", 0, 0,
+ )
+ a.NoError(err)
+ appFundTxID := appFundTxn.ID()
+ _, err = waitForTransaction(t, testClient, someAddress, appFundTxID.String(), 30*time.Second)
+ a.NoError(err)
+
+ createdBoxName := map[string]bool{}
+ var createdBoxCount uint64 = 0
+
+ // define operate box helper
+ operateBoxAndSendTxn := func(operation string, boxNames []string, boxValues []string, errPrefix ...string) {
+ txns := make([]transactions.Transaction, len(boxNames))
+ txIDs := make(map[string]string, len(boxNames))
+
+ for i := 0; i < len(boxNames); i++ {
+ appArgs := [][]byte{
+ []byte(operation),
+ []byte(boxNames[i]),
+ []byte(boxValues[i]),
+ }
+ boxRef := transactions.BoxRef{
+ Name: []byte(boxNames[i]),
+ Index: 0,
+ }
+
+ txns[i], err = testClient.MakeUnsignedAppNoOpTx(
+ uint64(createdAppID), appArgs,
+ nil, nil, nil,
+ []transactions.BoxRef{boxRef},
+ )
+ a.NoError(err)
+ txns[i], err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, txns[i])
+ a.NoError(err)
+ txIDs[txns[i].ID().String()] = someAddress
+ }
+
+ var gid crypto.Digest
+ gid, err = testClient.GroupID(txns)
+ a.NoError(err)
+
+ stxns := make([]transactions.SignedTxn, len(boxNames))
+ for i := 0; i < len(boxNames); i++ {
+ txns[i].Group = gid
+ wh, err = testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ stxns[i], err = testClient.SignTransactionWithWallet(wh, nil, txns[i])
+ a.NoError(err)
+ }
+
+ err = testClient.BroadcastTransactionGroup(stxns)
+ if len(errPrefix) == 0 {
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, txns[0].ID().String(), 30*time.Second)
+ a.NoError(err)
+ } else {
+ a.ErrorContains(err, errPrefix[0])
+ }
+ }
+
+ // `assertErrorResponse` confirms the _Result limit exceeded_ error response provides expected fields and values.
+ assertErrorResponse := func(err error, expectedCount, requestedMax uint64) {
+ a.Error(err)
+ e := err.(client.HTTPError)
+ a.Equal(400, e.StatusCode)
+
+ var er *model.ErrorResponse
+ err = protocol.DecodeJSON([]byte(e.ErrorString), &er)
+ a.NoError(err)
+ a.Equal("Result limit exceeded", er.Message)
+ a.Equal(uint64(100000), ((*er.Data)["max-api-box-per-application"]).(uint64))
+ a.Equal(requestedMax, ((*er.Data)["max"]).(uint64))
+ a.Equal(expectedCount, ((*er.Data)["total-boxes"]).(uint64))
+
+ a.Len(*er.Data, 3, fmt.Sprintf("error response (%v) contains unverified fields. Extend test for new fields.", *er.Data))
+ }
+
+ // `assertBoxCount` sanity checks that the REST API respects `expectedCount` through different queries against app ID = `createdAppID`.
+ assertBoxCount := func(expectedCount uint64) {
+ // Query without client-side limit.
+ resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+
+ // Query with requested max < expected expectedCount.
+ _, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount-1)
+ assertErrorResponse(err, expectedCount, expectedCount-1)
+
+ // Query with requested max == expected expectedCount.
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+
+ // Query with requested max > expected expectedCount.
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount+1)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+ }
+
+ // helper function, take operation and a slice of box names
+ // then submit transaction group containing all operations on box names
+ // Then we check these boxes are appropriately created/deleted
+ operateAndMatchRes := func(operation string, boxNames []string) {
+ boxValues := make([]string, len(boxNames))
+ if operation == "create" {
+ for i, box := range boxNames {
+ keyValid, ok := createdBoxName[box]
+ a.False(ok && keyValid)
+ boxValues[i] = ""
+ }
+ } else if operation == "delete" {
+ for i, box := range boxNames {
+ keyValid, ok := createdBoxName[box]
+ a.True(keyValid == ok)
+ boxValues[i] = ""
+ }
+ } else {
+ a.Failf("Unknown operation %s", operation)
+ }
+
+ operateBoxAndSendTxn(operation, boxNames, boxValues)
+
+ if operation == "create" {
+ for _, box := range boxNames {
+ createdBoxName[box] = true
+ }
+ createdBoxCount += uint64(len(boxNames))
+ } else if operation == "delete" {
+ for _, box := range boxNames {
+ createdBoxName[box] = false
+ }
+ createdBoxCount -= uint64(len(boxNames))
+ }
+
+ var resp model.BoxesResponse
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+
+ expectedCreatedBoxes := make([]string, 0, createdBoxCount)
+ for name, isCreate := range createdBoxName {
+ if isCreate {
+ expectedCreatedBoxes = append(expectedCreatedBoxes, name)
+ }
+ }
+ sort.Strings(expectedCreatedBoxes)
+
+ actualBoxes := make([]string, len(resp.Boxes))
+ for i, box := range resp.Boxes {
+ actualBoxes[i] = string(box.Name)
+ }
+ sort.Strings(actualBoxes)
+
+ a.Equal(expectedCreatedBoxes, actualBoxes)
+ }
+
+ testingBoxNames := []string{
+ ` `,
+ ` `,
+ ` ? = % ;`,
+ `; DROP *;`,
+ `OR 1 = 1;`,
+ `" ; SELECT * FROM kvstore; DROP acctrounds; `,
+ `背负青天而莫之夭阏者,而后乃今将图南。`,
+ `於浩歌狂熱之際中寒﹔於天上看見深淵。`,
+ `於一切眼中看見無所有﹔於無所希望中得救。`,
+ `有一遊魂,化為長蛇,口有毒牙。`,
+ `不以嚙人,自嚙其身,終以殞顛。`,
+ `那些智力超常的人啊`,
+ `认为已经,熟悉了云和闪电的脾气`,
+ `就不再迷惑,就不必了解自己,世界和他人`,
+ `每天只管,被微风吹拂,与猛虎谈情`,
+ `他们从来,不需要楼梯,只有窗口`,
+ `把一切交付于梦境,和优美的浪潮`,
+ `在这颗行星所有的酒馆,青春自由似乎理所应得`,
+ `面向涣散的未来,只唱情歌,看不到坦克`,
+ `在科学和啤酒都不能安抚的夜晚`,
+ `他们丢失了四季,惶惑之行开始`,
+ `这颗行星所有的酒馆,无法听到远方的呼喊`,
+ `野心勃勃的灯火,瞬间吞没黑暗的脸庞`,
+ `b64:APj/AA==`,
+ `str:123.3/aa\\0`,
+ string([]byte{0, 255, 254, 254}),
+ string([]byte{0, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF}),
+ `; SELECT key from kvstore WHERE key LIKE %;`,
+ `?&%!=`,
+ "SELECT * FROM kvstore " + string([]byte{0, 0}) + " WHERE key LIKE %; ",
+ string([]byte{'%', 'a', 'b', 'c', 0, 0, '%', 'a', '!'}),
+ `
+`,
+ `™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`,
+ `∑´´˙©˚¬∆ßåƒ√¬`,
+ }
+
+ // Happy Vanilla paths:
+ resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Empty(resp.Boxes)
+
+ // Some Un-Happy / Non-Vanilla paths:
+
+ // Even though the next box _does not exist_ as asserted by the error below,
+ // querying it for boxes _DOES NOT ERROR_. There is no easy way to tell
+ // the difference between non-existing boxes for an app that once existed
+ // vs. an app the NEVER existed.
+ nonexistantAppIndex := uint64(1337)
+ _, err = testClient.ApplicationInformation(nonexistantAppIndex)
+ a.ErrorContains(err, "application does not exist")
+ resp, err = testClient.ApplicationBoxes(nonexistantAppIndex, 0)
+ a.NoError(err)
+ a.Len(resp.Boxes, 0)
+
+ operateBoxAndSendTxn("create", []string{``}, []string{``}, "box names may not be zero length")
+
+ for i := 0; i < len(testingBoxNames); i += 16 {
+ var strSliceTest []string
+ // grouping box names to operate, and create such boxes
+ if i+16 >= len(testingBoxNames) {
+ strSliceTest = testingBoxNames[i:]
+ } else {
+ strSliceTest = testingBoxNames[i : i+16]
+ }
+ operateAndMatchRes("create", strSliceTest)
+ }
+
+ assertBoxCount(uint64(len(testingBoxNames)))
+
+ for i := 0; i < len(testingBoxNames); i += 16 {
+ var strSliceTest []string
+ // grouping box names to operate, and delete such boxes
+ if i+16 >= len(testingBoxNames) {
+ strSliceTest = testingBoxNames[i:]
+ } else {
+ strSliceTest = testingBoxNames[i : i+16]
+ }
+ operateAndMatchRes("delete", strSliceTest)
+ }
+
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Empty(resp.Boxes)
+
+ // Get Box value from box name
+ encodeInt := func(n uint64) []byte {
+ ibytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(ibytes, n)
+ return ibytes
+ }
+
+ boxTests := []struct {
+ name []byte
+ encodedName string
+ value []byte
+ }{
+ {[]byte("foo"), "str:foo", []byte("bar12")},
+ {encodeInt(12321), "int:12321", []byte{0, 1, 254, 3, 2}},
+ {[]byte{0, 248, 255, 32}, "b64:APj/IA==", []byte("lux56")},
+ }
+ for _, boxTest := range boxTests {
+ // Box values are 5 bytes, as defined by the test TEAL program.
+ operateBoxAndSendTxn("create", []string{string(boxTest.name)}, []string{""})
+ operateBoxAndSendTxn("set", []string{string(boxTest.name)}, []string{string(boxTest.value)})
+
+ boxResponse, err := testClient.GetApplicationBoxByName(uint64(createdAppID), boxTest.encodedName)
+ a.NoError(err)
+ a.Equal(boxTest.name, boxResponse.Name)
+ a.Equal(boxTest.value, boxResponse.Value)
+ }
+
+ const numberOfBoxesRemaining = uint64(3)
+ assertBoxCount(numberOfBoxesRemaining)
+
+ // Non-vanilla. Wasteful but correct. Can delete an app without first cleaning up its boxes.
+ appAccountData, err := testClient.AccountData(createdAppID.Address().String())
+ a.NoError(err)
+ a.Equal(numberOfBoxesRemaining, appAccountData.TotalBoxes)
+ a.Equal(uint64(30), appAccountData.TotalBoxBytes)
+
+ // delete the app
+ appDeleteTxn, err := testClient.MakeUnsignedAppDeleteTx(uint64(createdAppID), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ appDeleteTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appDeleteTxn)
+ a.NoError(err)
+ appDeleteTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appDeleteTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, appDeleteTxID, 30*time.Second)
+ a.NoError(err)
+
+ _, err = testClient.ApplicationInformation(uint64(createdAppID))
+ a.ErrorContains(err, "application does not exist")
+
+ assertBoxCount(numberOfBoxesRemaining)
+}
diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
index 064a61596..fc05837ba 100644
--- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
+++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
@@ -173,10 +173,12 @@ func TestManyAccountsCanGoOnline(t *testing.T) {
i = 0 // for assert debug messages
for txid, account := range txidsToAccountsGoOnline {
- accountStatus, err := client.AccountInformation(account)
+ accountStatus, err := client.AccountInformation(account, false)
+ a.NoError(err)
_, round := fixture.GetBalanceAndRound(account)
- curTxStatus, err := client.TransactionInformation(account, txid)
- a.True(curTxStatus.ConfirmedRound <= round, "go online transaction confirmed on round %d, current round is %d\n", curTxStatus.ConfirmedRound, round)
+ curTxStatus, err := client.PendingTransactionInformation(txid)
+ a.NotNil(curTxStatus.ConfirmedRound)
+ a.True(*curTxStatus.ConfirmedRound <= round, "go online transaction confirmed on round %d, current round is %d\n", curTxStatus.ConfirmedRound, round)
a.NoError(err, "should be no error when querying account information (query number %v regarding account %v)", i, account)
a.Equal(byte(basics.Online), accountStatus.Status, "account %v should be online by now", account)
i++
diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go
index 3667cc3ce..00ee3024a 100644
--- a/test/e2e-go/upgrades/application_support_test.go
+++ b/test/e2e-go/upgrades/application_support_test.go
@@ -149,7 +149,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
@@ -204,7 +204,7 @@ int 1
client.WaitForRound(round + 2)
pendingTx, err := client.GetPendingTransactions(1)
a.NoError(err)
- a.Equal(uint64(0), pendingTx.TotalTxns)
+ a.Equal(uint64(0), pendingTx.TotalTransactions)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
@@ -236,7 +236,7 @@ int 1
a.Equal(uint64(1), value.Uint)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
@@ -395,7 +395,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx)
a.NoError(err)
@@ -420,12 +420,12 @@ int 1
t.Skip("Test platform is too slow for this test")
}
- a.Equal(uint64(1), pendingTx.TotalTxns)
+ a.Equal(uint64(1), pendingTx.TotalTransactions)
// check that the secondary node doesn't have that transaction in it's transaction pool.
pendingTx, err = secondary.GetPendingTransactions(1)
a.NoError(err)
- a.Equal(uint64(0), pendingTx.TotalTxns)
+ a.Equal(uint64(0), pendingTx.TotalTransactions)
curStatus, err := client.Status()
a.NoError(err)
@@ -448,18 +448,14 @@ int 1
a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- _, err = client.BroadcastTransaction(signedTxn)
- a.NoError(err)
-
- curStatus, err = client.Status()
+ txid, err := client.BroadcastTransaction(signedTxn)
a.NoError(err)
- round = curStatus.LastRound
-
- client.WaitForRound(round + 2)
- pendingTx, err = client.GetPendingTransactions(1)
+ // Try polling 10 rounds to ensure txn is committed.
+ round, err = client.CurrentRound()
a.NoError(err)
- a.Equal(uint64(0), pendingTx.TotalTxns)
+ isCommitted := fixture.WaitForTxnConfirmation(round+10, creator, txid)
+ a.True(isCommitted)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
@@ -491,7 +487,7 @@ int 1
a.Equal(uint64(1), value.Uint)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
diff --git a/test/e2e-go/upgrades/stateproof_participation_test.go b/test/e2e-go/upgrades/stateproof_participation_test.go
index 6f9cf7f13..75d3dad87 100644
--- a/test/e2e-go/upgrades/stateproof_participation_test.go
+++ b/test/e2e-go/upgrades/stateproof_participation_test.go
@@ -17,6 +17,11 @@
package upgrades
import (
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -25,10 +30,6 @@ import (
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
- "path/filepath"
- "strings"
- "testing"
- "time"
)
func waitUntilProtocolUpgrades(a *require.Assertions, fixture *fixtures.RestClientFixture, nodeClient libgoal.Client) {
@@ -36,19 +37,19 @@ func waitUntilProtocolUpgrades(a *require.Assertions, fixture *fixtures.RestClie
curRound, err := nodeClient.CurrentRound()
a.NoError(err)
- blk, err := nodeClient.Block(curRound)
+ blk, err := nodeClient.BookkeepingBlock(curRound)
a.NoError(err)
curProtocol := blk.CurrentProtocol
startTime := time.Now()
// while consensus version has not upgraded
- for strings.Compare(curProtocol, string(consensusTestFastUpgrade(protocol.ConsensusV30))) == 0 {
+ for strings.Compare(string(curProtocol), string(consensusTestFastUpgrade(protocol.ConsensusV30))) == 0 {
curRound = curRound + 1
fixture.WaitForRoundWithTimeout(curRound + 1)
// TODO: check node status instead of latest block?
- blk, err := nodeClient.Block(curRound)
+ blk, err := nodeClient.BookkeepingBlock(curRound)
a.NoError(err)
curProtocol = blk.CurrentProtocol
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 746a0c2f9..02548c1f0 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -33,7 +33,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
- generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/libgoal"
@@ -479,7 +479,7 @@ func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusPar
// ConsensusParams returns the consensus parameters for the protocol from the specified round
func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) {
- block, err := f.LibGoalClient.Block(round)
+ block, err := f.LibGoalClient.BookkeepingBlock(round)
if err != nil {
return
}
@@ -526,31 +526,31 @@ func (f *LibGoalFixture) MinFeeAndBalance(round uint64) (minFee, minBalance uint
}
// TransactionProof returns a proof for usage in merkle array verification for the provided transaction.
-func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType crypto.HashType) (generatedV2.TransactionProofResponse, merklearray.SingleLeafProof, error) {
+func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType crypto.HashType) (model.TransactionProofResponse, merklearray.SingleLeafProof, error) {
proofResp, err := f.LibGoalClient.TransactionProof(txid, round, hashType)
if err != nil {
- return generatedV2.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
+ return model.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
}
- proof, err := merklearray.ProofDataToSingleLeafProof(proofResp.Hashtype, proofResp.Treedepth, proofResp.Proof)
+ proof, err := merklearray.ProofDataToSingleLeafProof(string(proofResp.Hashtype), proofResp.Treedepth, proofResp.Proof)
if err != nil {
- return generatedV2.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
+ return model.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
}
return proofResp, proof, nil
}
// LightBlockHeaderProof returns a proof for usage in merkle array verification for the provided block's light block header.
-func (f *LibGoalFixture) LightBlockHeaderProof(round uint64) (generatedV2.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) {
+func (f *LibGoalFixture) LightBlockHeaderProof(round uint64) (model.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) {
proofResp, err := f.LibGoalClient.LightBlockHeaderProof(round)
if err != nil {
- return generatedV2.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
+ return model.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
}
proof, err := merklearray.ProofDataToSingleLeafProof(crypto.Sha256.String(), proofResp.Treedepth, proofResp.Proof)
if err != nil {
- return generatedV2.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
+ return model.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
}
return proofResp, proof, nil
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index c9f3befa8..9239895cd 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -18,15 +18,20 @@ package fixtures
import (
"fmt"
- "github.com/algorand/go-algorand/data/basics"
"sort"
"time"
"unicode"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/daemon/algod/api/client"
+ v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/test/e2e-go/globals"
@@ -166,7 +171,7 @@ func (f *RestClientFixture) GetFirstAccount() (account string, err error) {
}
// GetRichestAccount returns the first account when calling GetWalletsSortedByBalance, which should be the richest account
-func (f *RestClientFixture) GetRichestAccount() (richest v1.Account, err error) {
+func (f *RestClientFixture) GetRichestAccount() (richest model.Account, err error) {
list, err := f.GetWalletsSortedByBalance()
if len(list) > 0 {
richest = list[0]
@@ -191,17 +196,17 @@ func (f *RestClientFixture) GetBalanceAndRound(account string) (balance uint64,
// GetWalletsSortedByBalance returns the Primary node's accounts sorted DESC by balance
// the richest account will be at accounts[0]
-func (f *RestClientFixture) GetWalletsSortedByBalance() (accounts []v1.Account, err error) {
+func (f *RestClientFixture) GetWalletsSortedByBalance() (accounts []model.Account, err error) {
return f.getNodeWalletsSortedByBalance(f.LibGoalClient)
}
// GetNodeWalletsSortedByBalance returns the specified node's accounts sorted DESC by balance
// the richest account will be at accounts[0]
-func (f *RestClientFixture) GetNodeWalletsSortedByBalance(nodeDataDir string) (accounts []v1.Account, err error) {
+func (f *RestClientFixture) GetNodeWalletsSortedByBalance(nodeDataDir string) (accounts []model.Account, err error) {
return f.getNodeWalletsSortedByBalance(f.GetLibGoalClientFromDataDir(nodeDataDir))
}
-func (f *RestClientFixture) getNodeWalletsSortedByBalance(client libgoal.Client) (accounts []v1.Account, err error) {
+func (f *RestClientFixture) getNodeWalletsSortedByBalance(client libgoal.Client) (accounts []model.Account, err error) {
wh, err := client.GetUnencryptedWalletHandle()
if err != nil {
return nil, fmt.Errorf("unable to retrieve wallet handle : %v", err)
@@ -211,7 +216,7 @@ func (f *RestClientFixture) getNodeWalletsSortedByBalance(client libgoal.Client)
return nil, fmt.Errorf("unable to list wallet addresses : %v", err)
}
for _, addr := range addresses {
- info, err := client.AccountInformation(addr)
+ info, err := client.AccountInformation(addr, true)
f.failOnError(err, "failed to get account info: %v")
accounts = append(accounts, info)
}
@@ -232,7 +237,7 @@ func (f *RestClientFixture) WaitForTxnConfirmation(roundTimeout uint64, accountA
// WaitForConfirmedTxn waits until either the passed txid is confirmed
// or until the passed roundTimeout passes
// or until waiting for a round to pass times out
-func (f *RestClientFixture) WaitForConfirmedTxn(roundTimeout uint64, accountAddress, txid string) (txn v1.Transaction, err error) {
+func (f *RestClientFixture) WaitForConfirmedTxn(roundTimeout uint64, accountAddress, txid string) (txn v2.PreEncodedTxInfo, err error) {
client := f.AlgodClient
for {
// Get current round information
@@ -241,11 +246,17 @@ func (f *RestClientFixture) WaitForConfirmedTxn(roundTimeout uint64, accountAddr
curRound := curStatus.LastRound
// Check if we know about the transaction yet
- txn, err = client.TransactionInformation(accountAddress, txid)
+ var resp []byte
+ resp, err = client.RawPendingTransactionInformation(txid)
if err == nil {
- return
+ err = protocol.DecodeReflect(resp, &txn)
+ require.NoError(f.t, err)
}
+ // Check if transaction was confirmed
+ if txn.ConfirmedRound != nil && *txn.ConfirmedRound > 0 {
+ return
+ }
// Check if we should wait a round
if curRound > roundTimeout {
err = fmt.Errorf("failed to see confirmed transaction by round %v", roundTimeout)
@@ -267,11 +278,11 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn
_, err := f.WaitForConfirmedTxn(roundTimeout, addr, txid)
if err != nil {
f.t.Logf("txn failed to confirm: ", addr, txid)
- pendingTxns, err := f.AlgodClient.GetPendingTransactions(0)
+ pendingTxns, err := f.LibGoalClient.GetParsedPendingTransactions(0)
if err == nil {
- pendingTxids := make([]string, 0, pendingTxns.TotalTxns)
- for _, txn := range pendingTxns.TruncatedTxns.Transactions {
- pendingTxids = append(pendingTxids, txn.TxID)
+ pendingTxids := make([]string, 0, pendingTxns.TotalTransactions)
+ for _, txn := range pendingTxns.TopTransactions {
+ pendingTxids = append(pendingTxids, txn.Txn.ID().String())
}
f.t.Logf("pending txids: ", pendingTxids)
} else {
@@ -300,7 +311,7 @@ func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAdd
curRound := curStatus.LastRound
// Check if we know about the transaction yet
- acct, acctErr := client.AccountInformation(accountAddress)
+ acct, acctErr := client.AccountInformation(accountAddress, false)
require.NoError(f.t, acctErr, "fixture should be able to get account info")
if acct.Amount > 0 {
return nil
@@ -318,7 +329,7 @@ func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAdd
// SendMoneyAndWait uses the rest client to send money and WaitForTxnConfirmation to wait for the send to confirm
// it adds some extra error checking as well
-func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
+func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) {
client := f.LibGoalClient
wh, err := client.GetUnencryptedWalletHandle()
require.NoError(f.t, err, "client should be able to get unencrypted wallet handle")
@@ -327,7 +338,7 @@ func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transaction
}
// SendMoneyAndWaitFromWallet is as above, but for a specific wallet
-func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
+func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v2.PreEncodedTxInfo) {
client := f.LibGoalClient
// use one curRound - 1 in case other nodes are behind
fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, basics.Round(curRound).SubSaturate(1), 0)
@@ -344,9 +355,9 @@ func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassw
func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) (blockWasProposed bool) {
c := f.LibGoalClient
for i := 0; i < countDownNumRounds; i++ {
- block, err := c.Block(uint64(fromRound - i))
+ cert, err := c.EncodedBlockCert(uint64(fromRound - i))
require.NoError(f.t, err, "client failed to get block %d", fromRound-i)
- if block.Proposer == account {
+ if cert.Certificate.Proposal.OriginalProposer.GetUserAddress() == account {
blockWasProposed = true
break
}
diff --git a/test/heapwatch/block_history.py b/test/heapwatch/block_history.py
index 29182e760..ac5c631c4 100644
--- a/test/heapwatch/block_history.py
+++ b/test/heapwatch/block_history.py
@@ -48,6 +48,19 @@ def addr_token_from_algod(algorand_data):
def loads(blob):
return msgpack.loads(base64.b64decode(blob), strict_map_key=False)
+def bstr(x):
+ if isinstance(x, bytes):
+ try:
+ return x.decode()
+ except:
+ pass
+ return x
+
+def obnice(ob):
+ if isinstance(ob, dict):
+ return {bstr(k):obnice(v) for k,v in ob.items()}
+ return ob
+
def dumps(blob):
return base64.b64encode(msgpack.dumps(blob))
@@ -180,8 +193,10 @@ class Fetcher:
if b is None:
print("got None nextblock. exiting")
return
- b = msgpack.loads(b, strict_map_key=False)
+ b = msgpack.loads(b, strict_map_key=False, raw=True)
+ b = obnice(b)
nowround = b['block'].get('rnd', 0)
+ logger.debug('r%d', nowround)
if (lastround is not None) and (nowround != lastround + 1):
logger.info('round jump %d to %d', lastround, nowround)
self._block_handler(b)
@@ -226,7 +241,7 @@ def main():
logging.basicConfig(level=logging.INFO)
algorand_data = args.algod or os.getenv('ALGORAND_DATA')
- if not algorand_data and not (args.token and args.addr):
+ if not algorand_data and not ((args.token or args.headers) and args.addr):
sys.stderr.write('must specify algod data dir by $ALGORAND_DATA or -d/--algod; OR --a/--addr and -t/--token\n')
sys.exit(1)
diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py
index 174c1dca1..48bd22ebd 100644
--- a/test/heapwatch/block_history_plot.py
+++ b/test/heapwatch/block_history_plot.py
@@ -23,12 +23,24 @@
# Graph over time of TPS or 10-round-moving-average-TPS
import base64
+import os
import statistics
from algosdk.encoding import msgpack
from matplotlib import pyplot as plt
def process(path, args):
+ minrnd = None
+ maxrnd = None
+ # maybe load first/last round bounds from heapWatch.py emitted rounds.json
+ rounds_json = os.path.join(os.path.dirname(path), 'rounds.json')
+ if os.path.exists(rounds_json):
+ with open(rounds_json) as fin:
+ rounds = json.load(fin)
+ minrnd = rounds['min']
+ maxrnd = rounds['max']
+ minrnd = args.start or minrnd or 0
+ maxrnd = args.stop or maxrnd
prevtime = None
prevtc = 0
prevts = None
@@ -52,6 +64,8 @@ def process(path, args):
count += 1
block = row['block']
rnd = block.get('rnd',0)
+ if (rnd < minrnd) or ((maxrnd is not None) and (rnd > maxrnd)):
+ continue
tc = block.get('tc', 0)
ts = block.get('ts', 0) # timestamp recorded at algod, 1s resolution int
_time = row['_time'] # timestamp recorded at client, 0.000001s resolution float
@@ -66,17 +80,20 @@ def process(path, args):
tsv.append(ts)
else:
tsv.append(_time)
- dtxn = tc - prevtc
- tps = dtxn / dt
- mintxn = min(dtxn,mintxn)
- maxtxn = max(dtxn,maxtxn)
- mindt = min(dt,mindt)
- maxdt = max(dt,maxdt)
- mintps = min(tps,mintps)
- maxtps = max(tps,maxtps)
- tpsv.append(tps)
- dtv.append(dt)
- txnv.append(dtxn)
+ if dt > 0.5:
+ dtxn = tc - prevtc
+ tps = dtxn / dt
+ mintxn = min(dtxn,mintxn)
+ maxtxn = max(dtxn,maxtxn)
+ mindt = min(dt,mindt)
+ maxdt = max(dt,maxdt)
+ mintps = min(tps,mintps)
+ maxtps = max(tps,maxtps)
+ tpsv.append(tps)
+ dtv.append(dt)
+ txnv.append(dtxn)
+ else:
+ print('b[{}] - b[{}], dt={}'.format(rnd-1,rnd,dt))
else:
tsv.append(ts)
prevrnd = rnd
@@ -90,7 +107,7 @@ def process(path, args):
mintps,maxtps,
))
- start = args.start
+ start = 0
end = len(txnv)-1
if not args.all:
# find the real start of the test
@@ -119,8 +136,12 @@ def process(path, args):
ax1.set_title('round time (seconds)')
ax1.hist(list(filter(lambda x: x < 9,dtv[start:end])),bins=20)
- ax2.set_title('TPS')
- ax2.hist(tpsv[start:end],bins=20)
+ if args.rtime:
+ ax2.set_title('round time')
+ ax2.plot(dtv)
+ else:
+ ax2.set_title('TPS')
+ ax2.hist(tpsv[start:end],bins=20)
ax3.set_title('txn/block')
ax3.hist(txnv[start:end],bins=20)
@@ -133,6 +154,8 @@ def process(path, args):
tc0 = tcv[i-10]
tca = tcv[i]
dt = tsa-ts0
+ if dt == 0:
+ continue
dtxn = tca-tc0
tpsv10.append(dtxn/dt)
if args.tps1:
@@ -152,7 +175,9 @@ def main():
ap.add_argument('files', nargs='+')
ap.add_argument('--all', default=False, action='store_true')
ap.add_argument('--tps1', default=False, action='store_true')
- ap.add_argument('--start', default=0, type=int, help='start round')
+ ap.add_argument('--rtime', default=False, action='store_true')
+ ap.add_argument('--start', default=None, type=int, help='start round')
+ ap.add_argument('--stop', default=None, type=int, help='stop round')
args = ap.parse_args()
for fname in args.files:
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index aced214f0..883223e05 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -262,6 +262,7 @@ class watcher:
self.they = []
self.netseen = set()
self.latest_round = None
+ self.rounds_seen = set()
self.bi_hosts = []
self.netToAd = {}
os.makedirs(self.args.out, exist_ok=True)
@@ -371,6 +372,7 @@ class watcher:
biq.put({})
mrt.join()
self.latest_round = mr.maxrnd
+ self.rounds_seen.add(self.latest_round)
logger.debug('blockinfo done')
if get_cpu:
cpuSample = durationToSeconds(self.args.cpu_sample) or 90
@@ -394,6 +396,16 @@ class watcher:
self.prevsnapshots = newsnapshots
logger.debug('end snapshot %s', snapshot_name)
+ def summaries(self):
+ if self.args.out and self.rounds_seen:
+ rpath = os.path.join(self.args.out, 'rounds.json')
+ with open(rpath, 'wt') as fout:
+ json.dump({
+ "min": min(self.rounds_seen),
+ "max": max(self.rounds_seen),
+ "all": sorted(self.rounds_seen),
+ }, fout)
+
def durationToSeconds(rts):
if rts is None:
return None
@@ -499,6 +511,7 @@ def main():
if (end_round is not None) and (app.latest_round is not None) and (app.latest_round >= end_round):
logger.debug('after end round %d > %d', app.latest_round, end_round)
return 0
+ app.summaries()
return 0
if __name__ == '__main__':
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
index 70324c3c7..3ec493988 100644
--- a/test/heapwatch/metrics_delta.py
+++ b/test/heapwatch/metrics_delta.py
@@ -191,7 +191,7 @@ class summary:
def blockinfo(self, curtime):
return self.biByTime.get(curtime)
- def byMsg(self):
+ def byMsg(self, html=False):
txPSums = {}
rxPSums = {}
secondsSum = 0
@@ -209,10 +209,14 @@ class summary:
dictMax(rxMax, ns.rxPLists)
dictMin(txMin, ns.txPLists)
dictMin(rxMin, ns.rxPLists)
- lines = [
- '{} nodes: {}'.format(len(nicks), nicks),
- '\ttx B/s\trx B/s',
- ]
+ nodesummary = '{} nodes: {}'.format(len(nicks), nicks)
+ lines = []
+ if html:
+ lines.append('<div>{}</div>'.format(nodesummary))
+ lines.append('<table><tr><th></th><th>tx B/s</th><th>rx B/s</th></tr>')
+ else:
+ lines.append(nodesummary)
+ lines.append('\ttx B/s\trx B/s')
for msg, txB in txPSums.items():
if msg not in rxPSums:
rxPSums[msg] = 0
@@ -220,7 +224,12 @@ class summary:
txBps = txPSums.get(msg,0)/secondsSum
if (txBps < 0.5) and (rxBps < 0.5):
continue
- lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps))
+ if html:
+ lines.append('<tr><td>{}</td><td>{:.0f}</td><td>{:.0f}</td></tr>'.format(msg, txBps, rxBps))
+ else:
+ lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps))
+ if html:
+ lines.append('</table>')
return '\n'.join(lines)
def txPool(self):
@@ -242,6 +251,12 @@ class summary:
)
def __str__(self):
+ return self.str(html=False)
+
+ def html(self):
+ return self.str(html=True)
+
+ def str(self, html=False):
if not self.sumsCount:
tps, txbps, rxbps = math.nan, math.nan, math.nan
blockTimes = math.nan
@@ -256,9 +271,17 @@ class summary:
labelspace = self.label + " "
if self.verifyMillis:
verifyMillis = labelspace + 'verify ms ({:.0f}/{:.0f}/{:.0f})\n'.format(min(self.verifyMillis), meanOrZero(self.verifyMillis), max(self.verifyMillis))
+ if html:
+ verifyMillis = '<div>' + verifyMillis + '</div>'
else:
verifyMillis = ''
- return '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
+ if html:
+ fmt = '{byMsg}\n{verifyMillis}<div>{labelspace}{txPool}</div>\n<div>{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s</div>'
+ if self.label:
+ fmt = '<div class="lh">' + self.label + '</div>' + fmt
+ else:
+ fmt = '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'
+ return fmt.format(labelspace=labelspace, byMsg=self.byMsg(html), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
def plot_pool(self, outpath):
from matplotlib import pyplot as plt
@@ -330,6 +353,7 @@ def main():
ap.add_argument('--mintps', default=None, type=float, help="records below min TPS don't add into summary")
ap.add_argument('--deltas', default=None, help='path to write csv deltas')
ap.add_argument('--report', default=None, help='path to write csv report')
+ ap.add_argument('--html-summary', default=None, help='path to write html summary')
ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated')
ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated')
ap.add_argument('--pool-plot-root', help='write to foo.svg and .png')
@@ -396,6 +420,9 @@ def main():
if args.pool_plot_root:
grsum.plot_pool(args.pool_plot_root)
+ htmlout = None
+ if args.html_summary:
+ htmlout = open(args.html_summary, 'wt')
# maybe subprocess for stats across named groups
if args.nick_re:
# use each --nick-re=foo as a group
@@ -404,6 +431,8 @@ def main():
process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
+ if htmlout:
+ htmlout.write(rsum.html())
return 0
if args.nick_lre:
for lnre in args.nick_lre:
@@ -412,10 +441,14 @@ def main():
process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
+ if htmlout:
+ htmlout.write(rsum.html())
return 0
# no filters, print global result
print(grsum)
+ if htmlout:
+ htmlout.write(grsum.html())
return 0
def perProtocol(prefix, lists, sums, deltas, dt):
@@ -515,6 +548,8 @@ class nodestats:
self.biByTime[curtime] = bi
if bi is None:
bi = bisource.get(curtime)
+ if bi is None:
+ logger.warning('%s no blockinfo', path)
self.txPool.append(cur.get('algod_tx_pool_count{}'))
#logger.debug('%s: %r', path, cur)
verifyGood = cur.get('algod_agreement_proposal_verify_good{}')
diff --git a/test/scripts/e2e_subs/app-assets.sh b/test/scripts/e2e_subs/app-assets.sh
index b582733d3..98171a616 100755
--- a/test/scripts/e2e_subs/app-assets.sh
+++ b/test/scripts/e2e_subs/app-assets.sh
@@ -81,10 +81,10 @@ function asset-id {
}
APPACCT=$(python -c "import algosdk.encoding as e; print(e.encode_address(e.checksum(b'appID'+($APPID).to_bytes(8, 'big'))))")
-
+EXAMPLE_URL="http://example.com"
function asset-create {
amount=$1; shift
- ${gcmd} asset create --creator "$SMALL" --total "$amount" --decimals 0 "$@"
+ ${gcmd} asset create --creator "$SMALL" --total "$amount" --decimals 0 "$@" --asseturl "$EXAMPLE_URL"
}
function asset-deposit {
@@ -101,6 +101,10 @@ function clawback_addr {
grep -o -E 'Clawback address: [A-Z0-9]{58}' | awk '{print $3}'
}
+function asset_url {
+ grep -o -E 'URL:.*'|awk '{print $2}'
+}
+
function payin {
amount=$1; shift
${gcmd} clerk send -f "$SMALL" -t "$APPACCT" -a "$amount" "$@"
@@ -180,6 +184,8 @@ asset-optin --assetid "$ASSETID" -a $USER #opt in to asset
${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $USER
cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr)
[ "$cb_addr" = "$USER" ]
+url=$(${gcmd} asset info --assetid $ASSETID | asset_url)
+[ "$url" = "$EXAMPLE_URL" ]
${gcmd} asset send -f "$SMALL" -t "$USER" -a "1000" --assetid "$ASSETID" --clawback "$USER"
[ $(asset_bal "$USER") = 1000 ]
[ $(asset_bal "$SMALL") = 999000 ]
diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh
index 9c43685ed..35198091e 100755
--- a/test/scripts/e2e_subs/asset-misc.sh
+++ b/test/scripts/e2e_subs/asset-misc.sh
@@ -102,6 +102,18 @@ fi
# case 3: asset created with manager, reserve, freezer, and clawback different from the creator
${gcmd} asset create --creator "${ACCOUNT}" --manager "${ACCOUNTB}" --reserve "${ACCOUNTC}" --freezer "${ACCOUNTD}" --clawback "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname dma --total 1000000000000 --asseturl "${ASSET_URL}"
+# case 3a: asset info should fail if reserve address has not opted into the asset.
+EXPERROR='account asset info not found'
+RES=$(${gcmd} asset info --creator $ACCOUNT --unitname dma 2>&1 || true)
+if [[ $RES != *"${EXPERROR}"* ]]; then
+ date '+asset-misc FAIL asset info should fail unless reserve account was opted in %Y%m%d_%H%M%S'
+ exit 1
+else
+ echo ok
+fi
+
+# case 3b: Reserve address opts into the the asset, and gets asset info successfully.
+${gcmd} asset optin --creator "${ACCOUNT}" --asset dma --account ${ACCOUNTC}
DIFF_MANAGER_ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname dma|grep 'Asset ID'|awk '{ print $3 }')
DMA_MANAGER_ADDRESS=$(${gcmd} asset info --assetid ${DIFF_MANAGER_ASSET_ID} |grep 'Manager address'|awk '{ print $3 }')
diff --git a/test/scripts/e2e_subs/box-search.sh b/test/scripts/e2e_subs/box-search.sh
new file mode 100755
index 000000000..8803e8785
--- /dev/null
+++ b/test/scripts/e2e_subs/box-search.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+TEAL=test/scripts/e2e_subs/tealprogs
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# Version 8 clear program
+printf '#pragma version 8\nint 1' > "${TEMPDIR}/clear.teal"
+
+APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/boxes.teal --clear-prog "$TEMPDIR/clear.teal" --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Fund the app account 10 algos
+APP_ACCOUNT=$(${gcmd} app info --app-id "$APPID" | grep "Application account" | awk '{print $3}')
+${gcmd} clerk send --to "$APP_ACCOUNT" --from "$ACCOUNT" --amount 10000000
+
+# Confirm that we are informed if no application boxes exist
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID" 2>&1 || true)
+EXPECTED="No boxes found for appid $APPID"
+
+[ "$BOX_LIST" = "$EXPECTED" ]
+
+# Confirm that we are informed if a specific application box does not exist
+BOX_INFO=$(${gcmd} app box info --app-id "$APPID" --name "str:not_found" 2>&1 || true)
+EXPECTED="No box found for appid $APPID with name str:not_found"
+
+[ "$BOX_INFO" = "$EXPECTED" ]
+
+# Create several boxes
+BOX_NAMES=("str:box1" "str:with spaces" "b64:YmFzZTY0" "b64:AQIDBA==") # b64:YmFzZTY0 == str:base64, b64:AQIDBA== is not unicode
+BOX_VALUE="box value"
+B64_BOX_VALUE="b64:Ym94IHZhbHVlAAAAAAAAAAAAAAAAAAAA"
+
+for BOX_NAME in "${BOX_NAMES[@]}"
+do
+ # Create the box
+ ${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:create" --app-arg "$BOX_NAME"
+
+ # Set box value
+ ${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:set" --app-arg "$BOX_NAME" --app-arg "str:$BOX_VALUE"
+done
+
+# Confirm that we can get the values of each individual box
+for BOX_NAME in "${BOX_NAMES[@]}"
+do
+ ${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME"
+ NAME=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Name | tr -s ' ' | cut -d" " -f2-)
+ [ "$NAME" = "$BOX_NAME" ]
+
+ VALUE=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Value | tr -s ' ' | cut -d" " -f2-)
+ [ "$VALUE" = "$B64_BOX_VALUE" ]
+done
+
+# Confirm that the account data representation knows about all the boxes
+APP_ACCOUNT_JSON_DUMP=$(${gcmd} account dump --address "$APP_ACCOUNT")
+ACTUAL_APP_ACCOUNT_NUM_BOXES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbx')
+EXPECTED_APP_ACCOUNT_NUM_BOXES=4
+ACTUAL_APP_ACCOUNT_BOX_BYTES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbxb')
+EXPECTED_APP_ACCOUNT_BOX_BYTES=121
+[ "$ACTUAL_APP_ACCOUNT_NUM_BOXES" -eq "$EXPECTED_APP_ACCOUNT_NUM_BOXES" ]
+[ "$ACTUAL_APP_ACCOUNT_BOX_BYTES" -eq "$EXPECTED_APP_ACCOUNT_BOX_BYTES" ]
+
+# Confirm that we can get a list of boxes belonging to a particular application
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID")
+EXPECTED="str:box1
+str:with spaces
+str:base64
+b64:AQIDBA=="
+
+# shellcheck disable=SC2059
+[ "$(printf "$BOX_LIST" | sort)" = "$(printf "$EXPECTED" | sort)" ]
+
+# Confirm that we can limit the number of boxes returned
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID" --max 4)
+[ "$(echo "$BOX_LIST" | wc -l)" -eq 4 ] # only one line
+# shellcheck disable=SC2143
+[ "$(grep -w "$BOX_LIST" <<< "$EXPECTED")" ] # actual box is in the expected list
+
+# Create and set a box in an atomic txn group:
+
+BOX_NAME="str:great box"
+echo "Create $BOX_NAME"
+${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:create" --app-arg "$BOX_NAME" -o "$TEMPDIR/box_create.txn"
+
+echo "Set $BOX_NAME using $BOX_VALUE"
+${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --app-arg "str:set" --app-arg "$BOX_NAME" --app-arg "str:$BOX_VALUE" -o "$TEMPDIR/box_set.txn"
+
+# Group them, sign and broadcast:
+cat "$TEMPDIR/box_create.txn" "$TEMPDIR/box_set.txn" > "$TEMPDIR/box_create_n_set.txn"
+${gcmd} clerk group -i "$TEMPDIR/box_create_n_set.txn" -o "$TEMPDIR/box_group.txn"
+${gcmd} clerk sign -i "$TEMPDIR/box_group.txn" -o "$TEMPDIR/box_group.stx"
+${gcmd} clerk rawsend -f "$TEMPDIR/box_group.stx"
+
+echo "Confirm that NAME $BOX_NAME as expected"
+${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME"
+NAME=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Name | tr -s ' ' | cut -d" " -f2-)
+[ "$NAME" = "$BOX_NAME" ]
+
+echo "Confirm that VALUE $BOX_VALUE i.e. ($B64_BOX_VALUE) as expected"
+VALUE=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Value | tr -s ' ' | cut -d" " -f2-)
+[ "$VALUE" = "$B64_BOX_VALUE" ]
+
+# Confirm that we can still get the list of boxes
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID")
+EXPECTED="str:box1
+str:with spaces
+str:base64
+b64:AQIDBA==
+str:great box"
+
+# shellcheck disable=SC2059
+[ "$(printf "$BOX_LIST" | sort)" = "$(printf "$EXPECTED" | sort)" ]
+
+# Confirm that the account data representation still knows about all the boxes
+APP_ACCOUNT_JSON_DUMP=$(${gcmd} account dump --address "$APP_ACCOUNT")
+ACTUAL_APP_ACCOUNT_NUM_BOXES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbx')
+EXPECTED_APP_ACCOUNT_NUM_BOXES=5
+ACTUAL_APP_ACCOUNT_BOX_BYTES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbxb')
+EXPECTED_APP_ACCOUNT_BOX_BYTES=154
+[ "$ACTUAL_APP_ACCOUNT_NUM_BOXES" -eq "$EXPECTED_APP_ACCOUNT_NUM_BOXES" ]
+[ "$ACTUAL_APP_ACCOUNT_BOX_BYTES" -eq "$EXPECTED_APP_ACCOUNT_BOX_BYTES" ]
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/e2e-app-simple.sh b/test/scripts/e2e_subs/e2e-app-simple.sh
index e770ee872..e1f1458ce 100755
--- a/test/scripts/e2e_subs/e2e-app-simple.sh
+++ b/test/scripts/e2e_subs/e2e-app-simple.sh
@@ -5,6 +5,7 @@ date '+app-simple-test start %Y%m%d_%H%M%S'
set -e
set -x
set -o pipefail
+set -o nounset
export SHELLOPTS
WALLET=$1
diff --git a/test/scripts/e2e_subs/rest-applications-endpoint.sh b/test/scripts/e2e_subs/rest-applications-endpoint.sh
index 18337a7c5..f66491ea8 100755
--- a/test/scripts/e2e_subs/rest-applications-endpoint.sh
+++ b/test/scripts/e2e_subs/rest-applications-endpoint.sh
@@ -31,5 +31,5 @@ call_and_verify "App parameter parsing error 1." "/v2/applications/-2" 400 "Inva
call_and_verify "App parameter parsing error 2." "/v2/applications/not-a-number" 400 "Invalid format for parameter application-id"
# Good request, but invalid query parameters
-call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 400 'Unknown parameter detected: this-should-fail'
+call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-not-fail=200" 200 '"global-state-schema":{"num-byte-slice":0,"num-uint":2}'
diff --git a/test/scripts/e2e_subs/rest-assets-endpoint.sh b/test/scripts/e2e_subs/rest-assets-endpoint.sh
index ddeeb2f71..9b6c77b36 100755
--- a/test/scripts/e2e_subs/rest-assets-endpoint.sh
+++ b/test/scripts/e2e_subs/rest-assets-endpoint.sh
@@ -30,5 +30,5 @@ call_and_verify "Asset parameter parsing error 1." "/v2/assets/-2" 400 "Invalid
call_and_verify "Asset parameter parsing error 2." "/v2/assets/not-a-number" 400 "Invalid format for parameter asset-id"
# Good request, but invalid query parameters
-call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail'
+call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-not-fail=200" 200 '","decimals":19,"default-frozen":false,"freeze":"'
diff --git a/test/scripts/e2e_subs/tealprogs/boxes.teal b/test/scripts/e2e_subs/tealprogs/boxes.teal
new file mode 100644
index 000000000..a8885a7bb
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/boxes.teal
@@ -0,0 +1,60 @@
+// Copied directly from cmd/goal/examples/boxes.teal
+
+#pragma version 8
+ txn ApplicationID
+ bz end
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 24 // [24]
+ txn NumAppArgs // [24, NumAppArgs]
+ int 2 // [24, NumAppArgs, 2]
+ == // [24, NumAppArgs=?=2]
+ bnz default // WARNING: Assumes that when "create" provided, NumAppArgs >= 3
+ pop // get rid of 24 // NumAppArgs != 2
+ txn ApplicationArgs 2 // [arg[2]] // ERROR when NumAppArgs == 1
+ btoi // [btoi(arg[2])]
+default: // [24] // NumAppArgs >= 3
+ txn ApplicationArgs 1 // [24, arg[1]]
+ swap
+ box_create // [] // boxes: arg[1] -> [24]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz test // "set" ? continue : goto test
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+test: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "check" // [arg[0], "check"]
+ == // [arg[0]=?="check"]
+ bz bad // "check" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ len // [arg[1], 0, len(arg[2])]
+ box_extract // [ boxes[arg[1]][0:len(arg[2])] ]
+ txn ApplicationArgs 2 // [ boxes[arg[1]][0:len(arg[2])], arg[2] ]
+ == // [ boxes[arg[1]][0:len(arg[2])]=?=arg[2] ]
+ assert // boxes[arg[1]].startwith(arg[2]) ? pop : ERROR
+ b end
+bad: // arg[0] ∉ {"create", "delete", "set", "check"}
+ err
+end:
+ int 1
diff --git a/test/testdata/configs/config-v24.json b/test/testdata/configs/config-v24.json
index 54bd0f9f2..0f8ea8350 100644
--- a/test/testdata/configs/config-v24.json
+++ b/test/testdata/configs/config-v24.json
@@ -49,6 +49,7 @@
"EnableRequestLogger": false,
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
"FallbackDNSResolverAddress": "",
@@ -63,8 +64,8 @@
"LogArchiveMaxAge": "",
"LogArchiveName": "node.archive.log",
"LogSizeLimit": 1073741824,
- "MaxAcctLookback": 4,
"MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
"MaxCatchpointDownloadDuration": 7200000000000,
"MaxConnectionsPerIP": 30,
"MinCatchpointFileDownloadBytesPerSecond": 20480,
diff --git a/test/testdata/configs/config-v25.json b/test/testdata/configs/config-v25.json
new file mode 100644
index 000000000..8647d9358
--- /dev/null
+++ b/test/testdata/configs/config-v25.json
@@ -0,0 +1,108 @@
+{
+ "Version": 25,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 7,
+ "AgreementIncomingProposalsQueueLength": 25,
+ "AgreementIncomingVotesQueueLength": 10000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 0,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAcctLookback": 4,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/test/testdata/deployednettemplates/generate-recipe/generate_network.py b/test/testdata/deployednettemplates/generate-recipe/generate_network.py
index aeeef4384..0a92aed20 100755
--- a/test/testdata/deployednettemplates/generate-recipe/generate_network.py
+++ b/test/testdata/deployednettemplates/generate-recipe/generate_network.py
@@ -31,13 +31,13 @@ def build_netgoal_params(template_dict):
instances = template_dict['instances']
relay_count = 0
- participating_node_count = 0
- non_participating_node_count = 0
+ participating_instance_count = 0
+ non_participating_instance_count = 0
for group in template_dict['groups']:
relay_count += getInstanceCount(instances['relays'], group['percent']['relays'])
- participating_node_count += getInstanceCount(instances['participatingNodes'], group['percent']['participatingNodes'])
- non_participating_node_count += getInstanceCount(instances['nonParticipatingNodes'], group['percent']['nonParticipatingNodes'])
+ participating_instance_count += getInstanceCount(instances['participatingNodes'], group['percent']['participatingNodes'])
+ non_participating_instance_count += getInstanceCount(instances['nonParticipatingNodes'], group['percent']['nonParticipatingNodes'])
relay_config = instances['relays']['config']
participating_node_config = instances['participatingNodes']['config']
@@ -45,13 +45,15 @@ def build_netgoal_params(template_dict):
wallets_count = template_dict['network']['wallets']
nodes_count = template_dict['network']['nodes']
+ npn_count = template_dict['network']['npn']
return [
'-w', str(wallets_count),
'-R', str(relay_count),
- '-N', str(participating_node_count),
- '-H', str(non_participating_node_count),
+ '-N', str(participating_instance_count),
+ '-X', str(non_participating_instance_count),
'-n', str(nodes_count),
+ '-x', str(npn_count),
'--relay-template', relay_config,
'--node-template', participating_node_config,
'--non-participating-node-template', non_participating_node_config
@@ -72,7 +74,7 @@ def build_genesis(template_path, netgoal_params, template_dict):
]
args.extend(netgoal_params)
netgoal(args, template_path)
- if template_dict['network']['ConsensusProtocol']:
+ if 'ConsensusProtocol' in template_dict['network']:
updateProtocol(f"{template_path}/generated/genesis.json", template_dict['network']['ConsensusProtocol'])
def updateProtocol(genesis_path, consensus_protocol):
diff --git a/test/testdata/deployednettemplates/recipes/README.md b/test/testdata/deployednettemplates/recipes/README.md
new file mode 100644
index 000000000..214c47aab
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/README.md
@@ -0,0 +1,19 @@
+# Recipes
+
+Most of the recipes' net.json and genesis.json use one of the following methods to call `netgoal generate`:
+1. `Makefile`
+2. `python3 {GO_ALGORAND_PATH}/test/testdata/deployednettemplates/generate-recipe/generate_network.py -f {PATH_TO}/network-tpl.json`
+
+Details for netgoal generate could be found in the binary with:
+```
+netgoal generate -h
+```
+
+Source code for netgoal can be found in `{GO_ALGORAND_PATH}/cmd/netgoal/generate.go`
+[Documentation](../../../../cmd/netgoal/README.md)
+
+Make sure you set the PATH and GOPATH variables to the netgoal binary's path.
+
+## Custom Recipe
+Leverages the generate_network.py script and has unique instructions found in the README:
+https://github.com/algorand/go-algorand/tree/master/test/testdata/deployednettemplates/recipes/custom
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
index 2a7d45039..4792348ed 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 1 -N 20 -n 20 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 1 -N 20 -n 20 --npn-algod-nodes 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
index 4cb3c207d..7b89472ad 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 5 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
index ae4344210..cc83dde97 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
+++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
@@ -2,7 +2,7 @@ import json
import os
node_types = {"R":5, "N":20, "NPN":10}
-node_size = {"R":"-c5d.4xl", "N":"-c5d.4xl", "NPN":"-c5d.4xl"}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.2xl", "NPN":"-c5d.2xl"}
regions = [
"AWS-US-EAST-1",
"AWS-US-WEST-1",
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
index d0c1b7e41..41578ff6a 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
@@ -3,7 +3,7 @@
"VersionModifier": "",
"ConsensusProtocol": "alpha4",
"FirstPartKeyRound": 0,
- "LastPartKeyRound": 50000,
+ "LastPartKeyRound": 3000000,
"PartKeyDilution": 0,
"Wallets": [
{
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
index 35cb3a098..3062f4eff 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/topology.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
@@ -22,123 +22,123 @@
},
{
"Name": "N1-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "N2-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "N3-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "N4-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "N5-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
},
{
"Name": "N6-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "N7-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "N8-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "N9-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "N10-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
},
{
"Name": "N11-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "N12-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "N13-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "N14-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "N15-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
},
{
"Name": "N16-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "N17-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "N18-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "N19-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "N20-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
},
{
"Name": "NPN1-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "NPN2-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "NPN3-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "NPN4-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "NPN5-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
},
{
"Name": "NPN6-alphanet",
- "Template": "AWS-US-EAST-1-c5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.2xl"
},
{
"Name": "NPN7-alphanet",
- "Template": "AWS-US-WEST-1-c5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.2xl"
},
{
"Name": "NPN8-alphanet",
- "Template": "AWS-SA-EAST-1-c5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.2xl"
},
{
"Name": "NPN9-alphanet",
- "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.2xl"
},
{
"Name": "NPN10-alphanet",
- "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.2xl"
}
]
}
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
index fd573230b..e04c879bb 100644
--- a/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 5 -N 20 -n 20 -H 20 -X 20 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 --npn-algod-nodes 20 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
index 626f3ff85..06c946a59 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
@@ -1,15 +1,21 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
-FILEPARAMS=--rounds 5000 --ntxns 1000 --naccounts 3000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" --bal 100000 --bal 1000000
+# bootstrappedScenario is scenario1s but with pre-built 30_000_000 accountdb
+PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+FILEPARAMS=--rounds 1600 --ntxns 20000 --naccounts 30000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" --bal 100000 --bal 1000000
-all: net.json genesis.json boostrappedFile.json
+all: net.json genesis.json topology.json boostrappedFile.json
-net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal
+net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
-genesis.json: ${GOPATH}/bin/netgoal
- netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+genesis.json: ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.l.json ${PARAMS}
+ jq '.LastPartKeyRound=22000|.NetworkName="s1s"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json
+ rm genesis.l.json
-boostrappedFile.json: ${GOPATH}/bin/netgoal
+topology.json: gen_topology.py
+ python3 gen_topology.py
+
+boostrappedFile.json: ${GOPATH}/bin/netgoal Makefile
netgoal generate -t loadingFile -r /tmp/wat -o boostrappedFile.json ${FILEPARAMS}
clean:
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
index 82ebb6b3f..9d1988766 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
@@ -1,6 +1,6 @@
{
- "numRounds": 5000,
- "roundTransactionsCount": 1000,
+ "numRounds": 1600,
+ "roundTransactionsCount": 20000,
"generatedAccountsCount": 30000000,
"generatedAssetsCount": 20000,
"generatedApplicationCount": 20000,
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py
index 69e156293..39081468c 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py
@@ -1,27 +1,27 @@
+##!/usr/bin/env python3
+# bootstrappedScenario is scenario1s but with a prebuilt accountdb
node_types = {"R":8, "N":20, "NPN":10}
-node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.2xl", "NPN":"-m5d.2xl"}
regions = [
"AWS-US-EAST-2"
]
-f = open("topology.json", "w")
-f.write("{ 'Hosts':\n [")
+import json
region_count = len(regions)
-first = True
-for x in node_types:
+hosts = []
+for x in sorted(node_types.keys()):
node_type = x
node_count = node_types[x]
region_size = node_size[x]
for i in range(node_count):
node_name = node_type + str(i+1)
region = regions[i%region_count]
- if (first ):
- first = False
- else:
- f.write(",")
- f.write ("\n {\n 'Name': '" + node_name + "',\n 'Template': '" + region + region_size + "'\n }" )
-
-f.write("\n ]\n}\n")
-f.close()
+ hosts.append({
+ "Name": node_name,
+ "Template": region + region_size,
+ })
+with open("topology.json", "w") as f:
+ topology = {"Hosts": hosts}
+ json.dump(topology, f, indent=2, sort_keys=True)
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
index 8200ee38c..293e4c268 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
@@ -1,1014 +1,164 @@
{
- "NetworkName": "",
- "VersionModifier": "",
- "ConsensusProtocol": "",
- "FirstPartKeyRound": 0,
- "LastPartKeyRound": 50000,
- "PartKeyDilution": 0,
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet3",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet4",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet5",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet6",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet7",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet8",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet9",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet10",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet11",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet12",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet13",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet14",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet15",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet16",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet17",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet18",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet19",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet20",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet21",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet22",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet23",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet24",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet25",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet26",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet27",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet28",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet29",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet30",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet31",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet32",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet33",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet34",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet35",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet36",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet37",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet38",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet39",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet40",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet41",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet42",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet43",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet44",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet45",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet46",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet47",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet48",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet49",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet50",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet51",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet52",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet53",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet54",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet55",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet56",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet57",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet58",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet59",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet60",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet61",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet62",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet63",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet64",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet65",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet66",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet67",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet68",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet69",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet70",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet71",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet72",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet73",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet74",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet75",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet76",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet77",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet78",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet79",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet80",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet81",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet82",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet83",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet84",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet85",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet86",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet87",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet88",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet89",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet90",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet91",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet92",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet93",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet94",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet95",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet96",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet97",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet98",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet99",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet100",
- "Stake": 0.5,
- "Online": true
- },
- {
- "Name": "Wallet101",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet102",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet103",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet104",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet105",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet106",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet107",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet108",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet109",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet110",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet111",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet112",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet113",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet114",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet115",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet116",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet117",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet118",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet119",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet120",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet121",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet122",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet123",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet124",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet125",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet126",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet127",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet128",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet129",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet130",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet131",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet132",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet133",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet134",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet135",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet136",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet137",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet138",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet139",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet140",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet141",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet142",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet143",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet144",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet145",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet146",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet147",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet148",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet149",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet150",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet151",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet152",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet153",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet154",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet155",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet156",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet157",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet158",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet159",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet160",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet161",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet162",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet163",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet164",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet165",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet166",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet167",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet168",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet169",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet170",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet171",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet172",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet173",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet174",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet175",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet176",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet177",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet178",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet179",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet180",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet181",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet182",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet183",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet184",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet185",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet186",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet187",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet188",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet189",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet190",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet191",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet192",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet193",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet194",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet195",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet196",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet197",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet198",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet199",
- "Stake": 0.5,
- "Online": false
- },
- {
- "Name": "Wallet200",
- "Stake": 0.5,
- "Online": false
- }
- ],
- "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
- "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
- "DevMode": false,
- "Comment": ""
+ "NetworkName": "s1s",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 22000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 5,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
index 0afb4418a..ffb3bb652 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
@@ -172,74 +172,7 @@
"ParticipationOnly": false
}
],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node21",
- "Wallets": [
- {
- "Name": "Wallet2",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node41",
- "Wallets": [
- {
- "Name": "Wallet3",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node61",
- "Wallets": [
- {
- "Name": "Wallet4",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node81",
- "Wallets": [
- {
- "Name": "Wallet5",
- "ParticipationOnly": false
- }
- ],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -259,78 +192,11 @@
"Name": "node2",
"Wallets": [
{
- "Name": "Wallet6",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node22",
- "Wallets": [
- {
- "Name": "Wallet7",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node42",
- "Wallets": [
- {
- "Name": "Wallet8",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node62",
- "Wallets": [
- {
- "Name": "Wallet9",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node82",
- "Wallets": [
- {
- "Name": "Wallet10",
+ "Name": "Wallet2",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -350,61 +216,11 @@
"Name": "node3",
"Wallets": [
{
- "Name": "Wallet11",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node23",
- "Wallets": [
- {
- "Name": "Wallet12",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node43",
- "Wallets": [
- {
- "Name": "Wallet13",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node63",
- "Wallets": [
- {
- "Name": "Wallet14",
+ "Name": "Wallet3",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -413,23 +229,6 @@
"EnableService": false,
"EnableBlockStats": false,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node83",
- "Wallets": [
- {
- "Name": "Wallet15",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -441,78 +240,11 @@
"Name": "node4",
"Wallets": [
{
- "Name": "Wallet16",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node24",
- "Wallets": [
- {
- "Name": "Wallet17",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node44",
- "Wallets": [
- {
- "Name": "Wallet18",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node64",
- "Wallets": [
- {
- "Name": "Wallet19",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node84",
- "Wallets": [
- {
- "Name": "Wallet20",
+ "Name": "Wallet4",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -532,78 +264,11 @@
"Name": "node5",
"Wallets": [
{
- "Name": "Wallet21",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node25",
- "Wallets": [
- {
- "Name": "Wallet22",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node45",
- "Wallets": [
- {
- "Name": "Wallet23",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node65",
- "Wallets": [
- {
- "Name": "Wallet24",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node85",
- "Wallets": [
- {
- "Name": "Wallet25",
+ "Name": "Wallet5",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -623,78 +288,11 @@
"Name": "node6",
"Wallets": [
{
- "Name": "Wallet26",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node26",
- "Wallets": [
- {
- "Name": "Wallet27",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node46",
- "Wallets": [
- {
- "Name": "Wallet28",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node66",
- "Wallets": [
- {
- "Name": "Wallet29",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node86",
- "Wallets": [
- {
- "Name": "Wallet30",
+ "Name": "Wallet6",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -714,10 +312,11 @@
"Name": "node7",
"Wallets": [
{
- "Name": "Wallet31",
+ "Name": "Wallet7",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -726,74 +325,6 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node27",
- "Wallets": [
- {
- "Name": "Wallet32",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node47",
- "Wallets": [
- {
- "Name": "Wallet33",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node67",
- "Wallets": [
- {
- "Name": "Wallet34",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node87",
- "Wallets": [
- {
- "Name": "Wallet35",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -805,10 +336,11 @@
"Name": "node8",
"Wallets": [
{
- "Name": "Wallet36",
+ "Name": "Wallet8",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -817,74 +349,6 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node28",
- "Wallets": [
- {
- "Name": "Wallet37",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node48",
- "Wallets": [
- {
- "Name": "Wallet38",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node68",
- "Wallets": [
- {
- "Name": "Wallet39",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node88",
- "Wallets": [
- {
- "Name": "Wallet40",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -896,78 +360,11 @@
"Name": "node9",
"Wallets": [
{
- "Name": "Wallet41",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node29",
- "Wallets": [
- {
- "Name": "Wallet42",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node49",
- "Wallets": [
- {
- "Name": "Wallet43",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node69",
- "Wallets": [
- {
- "Name": "Wallet44",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node89",
- "Wallets": [
- {
- "Name": "Wallet45",
+ "Name": "Wallet9",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -987,78 +384,11 @@
"Name": "node10",
"Wallets": [
{
- "Name": "Wallet46",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node30",
- "Wallets": [
- {
- "Name": "Wallet47",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node50",
- "Wallets": [
- {
- "Name": "Wallet48",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node70",
- "Wallets": [
- {
- "Name": "Wallet49",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node90",
- "Wallets": [
- {
- "Name": "Wallet50",
+ "Name": "Wallet10",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1078,78 +408,11 @@
"Name": "node11",
"Wallets": [
{
- "Name": "Wallet51",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node31",
- "Wallets": [
- {
- "Name": "Wallet52",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node51",
- "Wallets": [
- {
- "Name": "Wallet53",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node71",
- "Wallets": [
- {
- "Name": "Wallet54",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node91",
- "Wallets": [
- {
- "Name": "Wallet55",
+ "Name": "Wallet11",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1169,78 +432,11 @@
"Name": "node12",
"Wallets": [
{
- "Name": "Wallet56",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node32",
- "Wallets": [
- {
- "Name": "Wallet57",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node52",
- "Wallets": [
- {
- "Name": "Wallet58",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node72",
- "Wallets": [
- {
- "Name": "Wallet59",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node92",
- "Wallets": [
- {
- "Name": "Wallet60",
+ "Name": "Wallet12",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1260,78 +456,11 @@
"Name": "node13",
"Wallets": [
{
- "Name": "Wallet61",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node33",
- "Wallets": [
- {
- "Name": "Wallet62",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node53",
- "Wallets": [
- {
- "Name": "Wallet63",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node73",
- "Wallets": [
- {
- "Name": "Wallet64",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node93",
- "Wallets": [
- {
- "Name": "Wallet65",
+ "Name": "Wallet13",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1351,78 +480,11 @@
"Name": "node14",
"Wallets": [
{
- "Name": "Wallet66",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node34",
- "Wallets": [
- {
- "Name": "Wallet67",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node54",
- "Wallets": [
- {
- "Name": "Wallet68",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node74",
- "Wallets": [
- {
- "Name": "Wallet69",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node94",
- "Wallets": [
- {
- "Name": "Wallet70",
+ "Name": "Wallet14",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1442,61 +504,11 @@
"Name": "node15",
"Wallets": [
{
- "Name": "Wallet71",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node35",
- "Wallets": [
- {
- "Name": "Wallet72",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node55",
- "Wallets": [
- {
- "Name": "Wallet73",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node75",
- "Wallets": [
- {
- "Name": "Wallet74",
+ "Name": "Wallet15",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1505,23 +517,6 @@
"EnableService": false,
"EnableBlockStats": false,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node95",
- "Wallets": [
- {
- "Name": "Wallet75",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1533,78 +528,11 @@
"Name": "node16",
"Wallets": [
{
- "Name": "Wallet76",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node36",
- "Wallets": [
- {
- "Name": "Wallet77",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node56",
- "Wallets": [
- {
- "Name": "Wallet78",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node76",
- "Wallets": [
- {
- "Name": "Wallet79",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node96",
- "Wallets": [
- {
- "Name": "Wallet80",
+ "Name": "Wallet16",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1624,78 +552,11 @@
"Name": "node17",
"Wallets": [
{
- "Name": "Wallet81",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node37",
- "Wallets": [
- {
- "Name": "Wallet82",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node57",
- "Wallets": [
- {
- "Name": "Wallet83",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node77",
- "Wallets": [
- {
- "Name": "Wallet84",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node97",
- "Wallets": [
- {
- "Name": "Wallet85",
+ "Name": "Wallet17",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1715,61 +576,11 @@
"Name": "node18",
"Wallets": [
{
- "Name": "Wallet86",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node38",
- "Wallets": [
- {
- "Name": "Wallet87",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
- },
- {
- "Name": "node58",
- "Wallets": [
- {
- "Name": "Wallet88",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node78",
- "Wallets": [
- {
- "Name": "Wallet89",
+ "Name": "Wallet18",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1778,23 +589,6 @@
"EnableService": false,
"EnableBlockStats": false,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node98",
- "Wallets": [
- {
- "Name": "Wallet90",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1806,78 +600,11 @@
"Name": "node19",
"Wallets": [
{
- "Name": "Wallet91",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node39",
- "Wallets": [
- {
- "Name": "Wallet92",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node59",
- "Wallets": [
- {
- "Name": "Wallet93",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node79",
- "Wallets": [
- {
- "Name": "Wallet94",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node99",
- "Wallets": [
- {
- "Name": "Wallet95",
+ "Name": "Wallet19",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1897,78 +624,11 @@
"Name": "node20",
"Wallets": [
{
- "Name": "Wallet96",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node40",
- "Wallets": [
- {
- "Name": "Wallet97",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node60",
- "Wallets": [
- {
- "Name": "Wallet98",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node80",
- "Wallets": [
- {
- "Name": "Wallet99",
- "ParticipationOnly": false
- }
- ],
- "APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
- "MetricsURI": "{{MetricsURI}}",
- "EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
- },
- {
- "Name": "node100",
- "Wallets": [
- {
- "Name": "Wallet100",
+ "Name": "Wallet20",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -1988,43 +648,7 @@
"Name": "nonParticipatingNode1",
"Wallets": [
{
- "Name": "Wallet101",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet111",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet121",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet131",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet141",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet151",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet161",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet171",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet181",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet191",
+ "Name": "Wallet21",
"ParticipationOnly": false
}
],
@@ -2034,7 +658,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2046,43 +670,7 @@
"Name": "nonParticipatingNode2",
"Wallets": [
{
- "Name": "Wallet102",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet112",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet122",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet132",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet142",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet152",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet162",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet172",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet182",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet192",
+ "Name": "Wallet22",
"ParticipationOnly": false
}
],
@@ -2092,7 +680,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2104,43 +692,7 @@
"Name": "nonParticipatingNode3",
"Wallets": [
{
- "Name": "Wallet103",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet113",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet123",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet133",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet143",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet153",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet163",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet173",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet183",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet193",
+ "Name": "Wallet23",
"ParticipationOnly": false
}
],
@@ -2150,7 +702,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2162,43 +714,7 @@
"Name": "nonParticipatingNode4",
"Wallets": [
{
- "Name": "Wallet104",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet114",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet124",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet134",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet144",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet154",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet164",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet174",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet184",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet194",
+ "Name": "Wallet24",
"ParticipationOnly": false
}
],
@@ -2208,7 +724,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2220,43 +736,7 @@
"Name": "nonParticipatingNode5",
"Wallets": [
{
- "Name": "Wallet105",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet115",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet125",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet135",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet145",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet155",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet165",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet175",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet185",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet195",
+ "Name": "Wallet25",
"ParticipationOnly": false
}
],
@@ -2266,7 +746,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2278,43 +758,7 @@
"Name": "nonParticipatingNode6",
"Wallets": [
{
- "Name": "Wallet106",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet116",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet126",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet136",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet146",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet156",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet166",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet176",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet186",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet196",
+ "Name": "Wallet26",
"ParticipationOnly": false
}
],
@@ -2324,7 +768,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2336,43 +780,7 @@
"Name": "nonParticipatingNode7",
"Wallets": [
{
- "Name": "Wallet107",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet117",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet127",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet137",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet147",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet157",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet167",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet177",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet187",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet197",
+ "Name": "Wallet27",
"ParticipationOnly": false
}
],
@@ -2382,7 +790,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2394,43 +802,7 @@
"Name": "nonParticipatingNode8",
"Wallets": [
{
- "Name": "Wallet108",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet118",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet128",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet138",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet148",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet158",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet168",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet178",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet188",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet198",
+ "Name": "Wallet28",
"ParticipationOnly": false
}
],
@@ -2440,7 +812,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2452,43 +824,7 @@
"Name": "nonParticipatingNode9",
"Wallets": [
{
- "Name": "Wallet109",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet119",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet129",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet139",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet149",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet159",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet169",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet179",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet189",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet199",
+ "Name": "Wallet29",
"ParticipationOnly": false
}
],
@@ -2498,7 +834,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
},
@@ -2510,43 +846,7 @@
"Name": "nonParticipatingNode10",
"Wallets": [
{
- "Name": "Wallet110",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet120",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet130",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet140",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet150",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet160",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet170",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet180",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet190",
- "ParticipationOnly": false
- },
- {
- "Name": "Wallet200",
+ "Name": "Wallet30",
"ParticipationOnly": false
}
],
@@ -2556,7 +856,7 @@
"EnableMetrics": false,
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
]
}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
index 10e25e767..f6edee968 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
@@ -1,4 +1,5 @@
{
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableBlockStats": false,
"EnableTelemetry": false,
@@ -8,6 +9,7 @@
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
"AltConfigs": [
{
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableBlockStats": true,
"EnableTelemetry": true,
@@ -19,4 +21,3 @@
}
]
}
-
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json
index 8ab3b8bdd..48f453684 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json
@@ -1,5 +1,5 @@
{
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json
index 8e9c8e7cd..293fa64af 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json
@@ -1,156 +1,156 @@
-{ "Hosts":
- [
+{
+ "Hosts": [
{
- "Name": "R1",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N1",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R2",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N2",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R3",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N3",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R4",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N4",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R5",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N5",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R6",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N6",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R7",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N7",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "R8",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N8",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN1",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N9",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN2",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N10",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN3",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N11",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN4",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N12",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN5",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N13",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN6",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N14",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN7",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N15",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN8",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N16",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN9",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N17",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "NPN10",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N18",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N1",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N19",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N2",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "N20",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N3",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N4",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN2",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N5",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN3",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N6",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN4",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N7",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN5",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N8",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN6",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N9",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN7",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N10",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN8",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N11",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN9",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N12",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Name": "NPN10",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
},
{
- "Name": "N13",
+ "Name": "R1",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N14",
+ "Name": "R2",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N15",
+ "Name": "R3",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N16",
+ "Name": "R4",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N17",
+ "Name": "R5",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N18",
+ "Name": "R6",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N19",
+ "Name": "R7",
"Template": "AWS-US-EAST-2-m5d.4xl"
},
{
- "Name": "N20",
+ "Name": "R8",
"Template": "AWS-US-EAST-2-m5d.4xl"
}
]
-}
+} \ No newline at end of file
diff --git a/test/testdata/deployednettemplates/recipes/custom/README.md b/test/testdata/deployednettemplates/recipes/custom/README.md
index 78c0d3330..a74784c25 100644
--- a/test/testdata/deployednettemplates/recipes/custom/README.md
+++ b/test/testdata/deployednettemplates/recipes/custom/README.md
@@ -22,7 +22,7 @@ Build and create the recipe.
## "Quick" Start - Manual recipe generation (not using Jenkins)
Generate the recipe with the `network-tpl.json` file
-- (See the first section above for small networks.)
+- (See the first section above for small networks. See Troubleshooting for netgoal path set up)
1. Make sure you're in the same directory as this README and `cp network_templates/network-tpl.json network-tpl.json`
2. Generate the recipe with a python script:
```
@@ -67,7 +67,7 @@ Most parameters that can be modified by config.json can be found in `go-algorand
## Troubleshooting
### Can't find netgoal
-- Make sure you have netgoal installed
+- Make sure you have netgoal installed (you can either download it or run through the go-algorand build process)
- Make sure you export GOBIN and GOPATH in your environment and add it to your path.
On a mac, update by editing `~/.zshrc`, add
```
@@ -81,4 +81,4 @@ export PATH=$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/Users/ec2-user/L
- Make sure the machine type exists. It uses the regions in the groups and the type to come up with the host template name in `test/testdata/deployednettemplates/hosttemplates/hosttemplates.json`. If it doesn't exist, you will have to add it to that file.
### couldn't initialize the node: unsupported protocol
-- check your consensus.json. It may be missing the keys in the future protocol if you are doing this manually. Compare the consensus.json with `goal protocols > generated_consensus.json` \ No newline at end of file
+- check your consensus.json. It may be missing the keys in the future protocol if you are doing this manually. Compare the consensus.json with `goal protocols > generated_consensus.json`
diff --git a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
index 1f6a8b2fb..53c07e5fb 100644
--- a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
+++ b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
@@ -2,6 +2,7 @@
"network": {
"wallets": 6,
"nodes": 3,
+ "npn": 5,
"ConsensusProtocol": "future"
},
"instances": {
diff --git a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
index 5bc36419d..76dd8e77f 100644
--- a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
@@ -2,6 +2,7 @@
"network": {
"wallets": 6,
"nodes": 3,
+ "npn": 5,
"ConsensusProtocol": "future"
},
"instances": {
diff --git a/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json b/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
index 5bc36419d..6e8c20c5c 100644
--- a/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
@@ -1,24 +1,25 @@
{
"network": {
- "wallets": 6,
- "nodes": 3,
+ "wallets": 3,
+ "nodes": 1,
+ "npn": 1,
"ConsensusProtocol": "future"
},
"instances": {
"relays": {
"config": "./configs/relay.json",
- "type": "m5d.4xl",
+ "type": "m5d.2xl",
"count": 1
},
"participatingNodes": {
"config": "./configs/node.json",
- "type": "m5d.4xl",
- "count": 3
+ "type": "m5d.2xl",
+ "count": 1
},
"nonParticipatingNodes": {
"config": "./configs/nonPartNode.json",
- "type": "m5d.4xl",
- "count": 5
+ "type": "m5d.2xl",
+ "count": 1
}
},
"groups": [
diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
index 38d6fa96e..ae8d81048 100644
--- a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
@@ -1,7 +1,8 @@
{
"network": {
"wallets": 100,
- "nodes": 50
+ "nodes": 50,
+ "npn": 10
},
"instances": {
"relays": {
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/Makefile b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
index 21d38bbbd..5d8811ea7 100644
--- a/test/testdata/deployednettemplates/recipes/mmnet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 130 -R 136 -n 130 -H 16 --node-template configs/node.json --relay-template configs/relay.json --non-participating-node-template configs/nonPartNode.json
+PARAMS=-w 130 -R 136 -n 130 --npn-algod-nodes 16 --node-template configs/node.json --relay-template configs/relay.json --non-participating-node-template configs/nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/network-partition/Makefile b/test/testdata/deployednettemplates/recipes/network-partition/Makefile
index 24226bc5b..96c6b9a2f 100644
--- a/test/testdata/deployednettemplates/recipes/network-partition/Makefile
+++ b/test/testdata/deployednettemplates/recipes/network-partition/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 100 -R 8 -N 20 -n 100 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1/Makefile b/test/testdata/deployednettemplates/recipes/scenario1/Makefile
index 24226bc5b..96c6b9a2f 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario1/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 100 -R 8 -N 20 -n 100 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
index dc973560b..f4b191090 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
@@ -1,13 +1,18 @@
# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance
-PARAMS=-w 20 -R 8 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
-all: net.json genesis.json
+all: net.json genesis.json topology.json boostrappedFile.json
net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
genesis.json: ${GOPATH}/bin/netgoal Makefile
- netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+ netgoal generate -t genesis -r /tmp/wat -o genesis.l.json ${PARAMS}
+ jq '.LastPartKeyRound=22000|.NetworkName="s1s"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json
+ rm genesis.l.json
+
+topology.json: gen_topology.py
+ python3 gen_topology.py
clean:
rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py b/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py
index 07d14a4df..e8429efa0 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py
@@ -1,3 +1,4 @@
+##!/usr/bin/env python3
# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance
node_types = {"R":8, "N":20, "NPN":10}
node_size = {"R":"-m5d.4xl", "N":"-m5d.2xl", "NPN":"-m5d.2xl"}
@@ -5,11 +6,10 @@ regions = [
"AWS-US-EAST-2"
]
-f = open("topology.json", "w")
-f.write("{ \"Hosts\":\n [")
+import json
region_count = len(regions)
-first = True
+hosts = []
for x in sorted(node_types.keys()):
node_type = x
node_count = node_types[x]
@@ -17,11 +17,11 @@ for x in sorted(node_types.keys()):
for i in range(node_count):
node_name = node_type + str(i+1)
region = regions[i%region_count]
- if (first ):
- first = False
- else:
- f.write(",")
- f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Template\": \"" + region + region_size + "\"\n }" )
+ hosts.append({
+ "Name": node_name,
+ "Template": region + region_size,
+ })
-f.write("\n ]\n}\n")
-f.close()
+with open("topology.json", "w") as f:
+ topology = {"Hosts": hosts}
+ json.dump(topology, f, indent=2, sort_keys=True)
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json
index c66d0c920..31da8b8c2 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json
@@ -1,164 +1,165 @@
{
- "NetworkName": "b09",
- "VersionModifier": "",
- "ConsensusProtocol": "future",
- "FirstPartKeyRound": 0,
- "LastPartKeyRound": 22000,
- "PartKeyDilution": 0,
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet3",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet4",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet5",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet6",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet7",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet8",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet9",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet10",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet11",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet12",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet13",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet14",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet15",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet16",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet17",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet18",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet19",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet20",
- "Stake": 2.5,
- "Online": true
- },
- {
- "Name": "Wallet21",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet22",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet23",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet24",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet25",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet26",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet27",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet28",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet29",
- "Stake": 5,
- "Online": false
- },
- {
- "Name": "Wallet30",
- "Stake": 5,
- "Online": false
- }
- ],
- "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
- "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
- "DevMode": false,
- "Comment": ""
+ "NetworkName": "s1s",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 22000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 5,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPoolBalance": 125000000000000,
+ "DevMode": false,
+ "Comment": ""
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/net.json b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
index b1a5a3307..ffb3bb652 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/net.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
@@ -16,7 +16,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -36,7 +36,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -56,7 +56,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -76,7 +76,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -96,7 +96,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -116,7 +116,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -136,7 +136,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -156,7 +156,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -180,7 +180,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -204,7 +204,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -228,7 +228,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -252,7 +252,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -276,7 +276,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -300,7 +300,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -324,7 +324,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -348,7 +348,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -372,7 +372,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -396,7 +396,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -420,7 +420,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -444,7 +444,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -468,7 +468,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -492,7 +492,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -516,7 +516,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -540,7 +540,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -564,7 +564,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -588,7 +588,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -612,7 +612,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -636,7 +636,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/node.json b/test/testdata/deployednettemplates/recipes/scenario1s/node.json
index fad27f5fe..f6edee968 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/node.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/node.json
@@ -6,7 +6,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
"AltConfigs": [
{
"APIEndpoint": "{{APIEndpoint}}",
@@ -16,7 +16,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }",
"FractionApply": 0.2
}
]
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
index db8fb939d..563543a7b 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/topology.json b/test/testdata/deployednettemplates/recipes/scenario1s/topology.json
index c53f19cd5..293fa64af 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/topology.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/topology.json
@@ -1,5 +1,5 @@
-{ "Hosts":
- [
+{
+ "Hosts": [
{
"Name": "N1",
"Template": "AWS-US-EAST-2-m5d.2xl"
@@ -153,4 +153,4 @@
"Template": "AWS-US-EAST-2-m5d.4xl"
}
]
-}
+} \ No newline at end of file
diff --git a/test/testdata/deployednettemplates/recipes/scenario2/Makefile b/test/testdata/deployednettemplates/recipes/scenario2/Makefile
index c6f8415b9..b5539235e 100644
--- a/test/testdata/deployednettemplates/recipes/scenario2/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario2/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 400 -R 20 -N 40 -n 200 -H 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 400 -R 20 -N 40 -n 200 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/Makefile b/test/testdata/deployednettemplates/recipes/scenario3/Makefile
index d53f1fb6d..092d2f12f 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario3/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 10000 -R 20 -N 100 -n 1000 -H 15 --node-template node.json --relay-template relay.json
+PARAMS=-w 10000 -R 20 -N 100 -n 1000 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/Makefile b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
index f49294616..fe1ea4f9e 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
@@ -1,5 +1,5 @@
# scenario3s is scenario3 but smaller. (10000 wallets -> 500) (1000 algod participating nodes -> 100) It still keeps a global datacenter distribution.
-PARAMS=-w 500 -R 20 -N 100 -n 100 -H 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 500 -R 20 -N 100 -n 100 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
SOURCES=node.json ${GOPATH}/bin/netgoal Makefile relay.json nonPartNode.json
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/Makefile b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
index bd62e7b67..e86564e10 100644
--- a/test/testdata/deployednettemplates/recipes/txnsync/Makefile
+++ b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 10 -R 4 -N 10 -n 10 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 10 -R 4 -N 10 -n 10 --npn-algod-nodes 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/tools/debug/doberman/logo.go b/tools/debug/doberman/logo.go
deleted file mode 100644
index a27411074..000000000
--- a/tools/debug/doberman/logo.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package main
-
-// data, err := os.ReadFile("algorand-logo.png")
-// fmt.Printf("%#v\n", data)
-
-var logo = []byte{0x89, 0x50, 0x4e, 0x47, 0xd, 0xa, 0x1a, 0xa, 0x0, 0x0, 0x0, 0xd, 0x49, 0x48, 0x44, 0x52, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0xf0, 0x8, 0x2, 0x0, 0x0, 0x0, 0xb1, 0x37, 0x7e, 0xc5, 0x0, 0x0, 0xf, 0xa1, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0xec, 0x9d, 0x7b, 0x6c, 0x14, 0xd5, 0x17, 0xc7, 0x67, 0xbb, 0xdb, 0xd7, 0x6e, 0x2d, 0x65, 0xfb, 0xb2, 0x80, 0xf, 0x5a, 0xa0, 0x22, 0x18, 0x68, 0x8, 0xa2, 0x88, 0x1a, 0x63, 0x85, 0x50, 0xd2, 0x8a, 0x50, 0x81, 0xaa, 0x4, 0x62, 0xf0, 0x5, 0x46, 0x57, 0x14, 0x9, 0xad, 0x9, 0x1a, 0x6c, 0xa2, 0xc5, 0x12, 0xf8, 0x3, 0xf1, 0x81, 0x5, 0x6b, 0x45, 0x9a, 0xa, 0x85, 0x95, 0x50, 0x8b, 0x3c, 0x82, 0xba, 0x46, 0xd0, 0x3e, 0x10, 0x69, 0x30, 0xb5, 0x58, 0x42, 0x8b, 0x85, 0x6e, 0xb, 0xdd, 0xdd, 0xb6, 0xfb, 0x9c, 0xfd, 0xe5, 0x97, 0x4d, 0xaa, 0x11, 0x28, 0xdd, 0x73, 0x66, 0xf6, 0x31, 0xfd, 0x7e, 0xfe, 0x2, 0xc2, 0x9d, 0x33, 0x7b, 0xe7, 0xb3, 0x67, 0xcf, 0xdc, 0xb9, 0x73, 0xaf, 0xc6, 0xeb, 0xf5, 0xa, 0x0, 0x28, 0x85, 0x8, 0x74, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x40, 0xf9, 0x68, 0xd0, 0x5, 0x41, 0xa7, 0xa9, 0xa9, 0x69, 0xf7, 0xee, 0xdd, 0x11, 0x11, 0xfe, 0x25, 0x17, 0x9d, 0x4e, 0xb7, 0x66, 0xcd, 0x1a, 0xf4, 0xde, 0x7f, 0x50, 0x79, 0xbd, 0x5e, 0xf4, 0x42, 0x70, 0x99, 0x35, 0x6b, 0x96, 0xc9, 0x64, 0x22, 0x34, 0xb4, 0xdb, 0xed, 0xd1, 0xd1, 0xd1, 0xe8, 0x40, 0x94, 0x1c, 0x21, 0xc4, 0xf1, 0xe3, 0xc7, 0x69, 0x36, 0xb, 0x82, 0x70, 0xfa, 0xf4, 0x69, 0x74, 0x20, 0x84, 0xe, 0x21, 0xdc, 0x6e, 0x77, 0x5e, 0x5e, 0x1e, 0xb9, 0xf9, 0xd7, 0x5f, 0x7f, 0x8d, 0x3e, 0x84, 0xd0, 0x21, 0xc4, 0x87, 0x1f, 0x7e, 0x68, 0xb1, 0x58, 0xc8, 0xcd, 0xcb, 0xca, 0xca, 0xd0, 0x87, 0xa8, 0xa1, 0x43, 0x28, 0x3d, 0x47, 0x46, 0x46, 0x32, 0xf, 0x62, 0xb5, 0x5a, 0xe3, 0xe2, 0xe2, 0xd0, 0x99, 0xc8, 0xd0, 0xc1, 0x67, 0xf3, 0xe6, 0xcd, 0xfc, 0x83, 0xb4, 0xb5, 0xb5, 0xa1, 0x27, 0x21, 0x74, 0xf0, 0x69, 0x68, 0x68, 0x90, 0x64, 0xd0, 0xed, 0xb3, 0xcf, 0x3e, 0x43, 0x67, 0xa2, 0xe4, 0x8, 0x3e, 0x23, 0x47, 0x8e, 0xbc, 0x7a, 0xf5, 0x2a, 0xff, 0x38, 0x71, 0x71, 0x71, 0x16, 0x8b, 0x45, 0xa5, 0x52, 0xa1, 0x4b, 0x91, 0xa1, 0x83, 0xc6, 0xfb, 0xef, 0xbf, 0x2f, 0x89, 0xcd, 0x82, 0x20, 0xd8, 0x6c, 0xb6, 0xfa, 0xfa, 0x7a, 0x74, 0x29, 0x32, 0x74, 0xd0, 0xe8, 0xeb, 0xeb, 0x4b, 0x48, 0x48, 0x70, 0xb9, 0x5c, 0x52, 0x1d, 0x30, 0x37, 0x37, 0xd7, 0x68, 0x34, 0xa2, 0x63, 0x91, 0xa1, 0x83, 0xc3, 0xb, 0x2f, 0xbc, 0x20, 0xa1, 0xcd, 0x82, 0x20, 0xd4, 0xd4, 0xd4, 0xa0, 0x57, 0x91, 0xa1, 0x83, 0x43, 0x6f, 0x6f, 0xaf, 0x1c, 0xa3, 0x6c, 0xb8, 0x88, 0xc8, 0xd0, 0xc1, 0xe1, 0xe5, 0x97, 0x5f, 0x96, 0xe3, 0xb0, 0x3f, 0xfc, 0xf0, 0x3, 0xfa, 0x16, 0x19, 0x3a, 0xd0, 0xb4, 0xb6, 0xb6, 0x8e, 0x1d, 0x3b, 0x56, 0x8e, 0x23, 0xcf, 0x9e, 0x3d, 0xbb, 0xb6, 0xb6, 0x16, 0x3d, 0xc, 0xa1, 0x3, 0x87, 0xcb, 0xe5, 0xba, 0xeb, 0xae, 0xbb, 0xce, 0x9d, 0x3b, 0x27, 0xc7, 0xc1, 0xe3, 0xe3, 0xe3, 0x7b, 0x7a, 0x7a, 0xd0, 0xc9, 0x28, 0x39, 0x2, 0xc7, 0xd2, 0xa5, 0x4b, 0x65, 0xb2, 0x59, 0x10, 0x4, 0x8b, 0xc5, 0x2, 0xa1, 0x21, 0x74, 0xe0, 0xe8, 0xea, 0xea, 0xaa, 0xac, 0xac, 0x94, 0x35, 0x44, 0x45, 0x45, 0x5, 0xfa, 0x19, 0x42, 0x7, 0x88, 0xe5, 0xcb, 0x97, 0xcb, 0x1d, 0xe2, 0xf0, 0xe1, 0xc3, 0xe8, 0x67, 0xd4, 0xd0, 0x81, 0xe0, 0xe2, 0xc5, 0x8b, 0xa3, 0x47, 0x8f, 0x96, 0x3b, 0x4a, 0x52, 0x52, 0x52, 0x67, 0x67, 0x27, 0x7a, 0x1b, 0x19, 0x5a, 0x76, 0x66, 0xcf, 0x9e, 0x1d, 0x80, 0x28, 0x66, 0xb3, 0xb9, 0xa3, 0xa3, 0x3, 0xbd, 0xd, 0xa1, 0xe5, 0xa5, 0xba, 0xba, 0xfa, 0xcc, 0x99, 0x33, 0x81, 0x89, 0xb5, 0x6f, 0xdf, 0x3e, 0x74, 0x38, 0x4a, 0xe, 0x19, 0x71, 0xbb, 0xdd, 0x49, 0x49, 0x49, 0x1, 0x1b, 0x7f, 0xc8, 0xce, 0xce, 0xfe, 0xee, 0xbb, 0xef, 0x90, 0xa1, 0x81, 0x5c, 0x94, 0x96, 0x96, 0x6, 0x72, 0x34, 0xed, 0xd8, 0xb1, 0x63, 0x1e, 0x8f, 0x7, 0x19, 0x1a, 0x19, 0x5a, 0xae, 0xf4, 0x1c, 0x15, 0x15, 0x15, 0xe0, 0xee, 0x6d, 0x6f, 0x6f, 0x1f, 0x35, 0x6a, 0x14, 0x32, 0x34, 0x90, 0x9e, 0x4d, 0x9b, 0x36, 0x91, 0x6d, 0x26, 0x4f, 0xd8, 0x3f, 0x7a, 0xf4, 0x28, 0x4a, 0xe, 0x20, 0x3d, 0x3f, 0xff, 0xfc, 0xf3, 0xda, 0xb5, 0x6b, 0xc9, 0xcd, 0xd, 0x6, 0x43, 0x52, 0x52, 0x12, 0xa1, 0xe1, 0xf6, 0xed, 0xdb, 0x51, 0x72, 0xa0, 0xe4, 0x90, 0x9e, 0x98, 0x98, 0x18, 0x87, 0xc3, 0x41, 0x6b, 0xab, 0xd5, 0x6a, 0x7b, 0x7b, 0x7b, 0x73, 0x73, 0x73, 0xf, 0x1c, 0x38, 0x40, 0x68, 0xee, 0x74, 0x3a, 0xf9, 0x2f, 0x93, 0x23, 0x43, 0x83, 0x7f, 0x78, 0xeb, 0xad, 0xb7, 0xc8, 0x36, 0xfb, 0x6a, 0x15, 0xce, 0xc3, 0xc5, 0x8b, 0x17, 0x2f, 0x22, 0x43, 0x3, 0xc9, 0x70, 0xb9, 0x5c, 0x5a, 0xad, 0xd6, 0xed, 0x76, 0xd3, 0x9a, 0xa7, 0xa4, 0xa4, 0x5c, 0xba, 0x74, 0xc9, 0xeb, 0xf5, 0x8a, 0xa2, 0x18, 0x15, 0x15, 0x25, 0x8a, 0xa2, 0xbf, 0x47, 0x28, 0x2d, 0x2d, 0x5d, 0xbd, 0x7a, 0xf5, 0xb0, 0xed, 0x7f, 0xac, 0x3e, 0x2a, 0x31, 0x45, 0x45, 0x45, 0x64, 0x9b, 0x5, 0x41, 0x28, 0x29, 0x29, 0xf1, 0xdd, 0x14, 0xaa, 0xd5, 0x6a, 0x9d, 0x4e, 0x67, 0xb5, 0x5a, 0xfd, 0x3d, 0xc2, 0xd6, 0xad, 0x5b, 0xf5, 0x7a, 0xbd, 0xbf, 0xad, 0xbc, 0x5e, 0x6f, 0x6a, 0x6a, 0x6a, 0x4e, 0x4e, 0xe, 0x32, 0x34, 0xf8, 0x7, 0x9b, 0xcd, 0x16, 0x1f, 0x1f, 0x4f, 0xee, 0x52, 0xbd, 0x5e, 0xdf, 0xd5, 0xd5, 0x35, 0xf0, 0xd7, 0xec, 0xec, 0xec, 0x23, 0x47, 0x8e, 0x4, 0xec, 0xe4, 0xcd, 0x66, 0xb3, 0x5e, 0xaf, 0xf, 0xf7, 0x15, 0x11, 0x50, 0x43, 0x4b, 0xc9, 0xd2, 0xa5, 0x4b, 0x39, 0x9, 0xa2, 0xae, 0xae, 0x6e, 0xa0, 0xb9, 0xd7, 0xeb, 0xd, 0xe4, 0xf2, 0xcf, 0x35, 0x35, 0x35, 0x89, 0x89, 0x89, 0x4a, 0x58, 0xdf, 0xc3, 0xb, 0x24, 0xa2, 0xb1, 0xb1, 0x91, 0x73, 0x21, 0xbe, 0xf8, 0xe2, 0x8b, 0x6b, 0x8f, 0x19, 0x98, 0xe5, 0x9f, 0xc7, 0x8f, 0x1f, 0x2f, 0x8a, 0xa2, 0x32, 0xae, 0x2, 0x32, 0xb4, 0x34, 0x88, 0xa2, 0xf8, 0xf8, 0xe3, 0x8f, 0x93, 0x9b, 0xa7, 0xa5, 0xa5, 0x3d, 0xfd, 0xf4, 0xd3, 0xd7, 0xfe, 0xfb, 0x13, 0x4f, 0x3c, 0x11, 0x80, 0x93, 0xdf, 0xbf, 0x7f, 0xbf, 0x62, 0xd6, 0x5e, 0x82, 0xd0, 0xd2, 0x70, 0xe0, 0xc0, 0x81, 0xf3, 0xe7, 0xcf, 0x93, 0x9b, 0x1b, 0x8d, 0xc6, 0xeb, 0x2a, 0x55, 0x58, 0x58, 0x28, 0xf7, 0x99, 0xe7, 0xe7, 0xe7, 0x4f, 0x9c, 0x38, 0x51, 0x31, 0x17, 0x2, 0x37, 0x85, 0xd2, 0xa4, 0x67, 0xbd, 0x5e, 0x4f, 0x9e, 0x87, 0x34, 0x66, 0xcc, 0x98, 0xb, 0x17, 0x2e, 0xdc, 0xa8, 0x20, 0xf4, 0x77, 0xef, 0x15, 0x7f, 0x69, 0x6a, 0x6a, 0x52, 0x92, 0xd0, 0xc8, 0xd0, 0x12, 0x30, 0x77, 0xee, 0x5c, 0xce, 0xac, 0xba, 0x41, 0x16, 0xe2, 0x57, 0xa9, 0x54, 0xb2, 0x96, 0xd1, 0xf3, 0xe6, 0xcd, 0x53, 0x92, 0xcd, 0xb8, 0x29, 0x94, 0x80, 0xd6, 0xd6, 0x56, 0x4e, 0xff, 0x3f, 0xf2, 0xc8, 0x23, 0x83, 0x1f, 0x7f, 0xe5, 0xca, 0x95, 0x32, 0x5d, 0xfa, 0xc8, 0xc8, 0x48, 0xb7, 0xdb, 0xad, 0x98, 0xdb, 0x41, 0xdc, 0x14, 0x4a, 0xc3, 0x83, 0xf, 0x3e, 0x48, 0x6e, 0xab, 0x56, 0xab, 0x8d, 0x46, 0xe3, 0xe0, 0x55, 0xdf, 0x4b, 0x2f, 0xbd, 0x24, 0xd3, 0x99, 0xaf, 0x5f, 0xbf, 0x5e, 0xad, 0x56, 0x2b, 0x6c, 0x29, 0x5e, 0x3c, 0x29, 0x64, 0xb1, 0x73, 0xe7, 0xce, 0x1b, 0x95, 0xbf, 0x43, 0xe1, 0xd5, 0x57, 0x5f, 0xbd, 0xe9, 0x52, 0x77, 0xe3, 0xc6, 0x8d, 0x93, 0xe3, 0xcc, 0xb5, 0x5a, 0x6d, 0x51, 0x51, 0x91, 0xf2, 0xae, 0x8, 0x6e, 0xa, 0x59, 0xdc, 0x72, 0xcb, 0x2d, 0x36, 0x9b, 0x8d, 0x98, 0x4b, 0x34, 0x1a, 0x97, 0xcb, 0xe5, 0xf5, 0x7a, 0x6f, 0x9a, 0x23, 0x27, 0x4c, 0x98, 0xd0, 0xdc, 0xdc, 0x2c, 0xed, 0x99, 0xd7, 0xd5, 0xd5, 0x65, 0x65, 0x65, 0x29, 0x6f, 0xa5, 0x74, 0x94, 0x1c, 0x74, 0x8a, 0x8a, 0x8a, 0xc8, 0x36, 0xfb, 0x86, 0xea, 0x86, 0x38, 0x97, 0xff, 0xb9, 0xe7, 0x9e, 0x93, 0xf6, 0xcc, 0x8b, 0x8b, 0x8b, 0x15, 0x69, 0x33, 0x32, 0x34, 0x1d, 0x97, 0xcb, 0xa5, 0xd3, 0xe9, 0xc8, 0x2b, 0x3d, 0x4f, 0x9f, 0x3e, 0xfd, 0xe4, 0xc9, 0x93, 0x43, 0xfc, 0xcf, 0x27, 0x4e, 0x9c, 0xb8, 0xef, 0xbe, 0xfb, 0x24, 0x3c, 0x79, 0x9b, 0xcd, 0xa6, 0xd3, 0xe9, 0x14, 0x79, 0x5d, 0x90, 0xa1, 0xe9, 0x77, 0x54, 0x9c, 0x75, 0xcb, 0xfd, 0x5a, 0x2c, 0x74, 0xca, 0x94, 0x29, 0x12, 0x9e, 0xf9, 0x7, 0x1f, 0x7c, 0xa0, 0x54, 0x9b, 0x5, 0xc, 0xdb, 0xd1, 0xb8, 0x74, 0xe9, 0x12, 0xa7, 0xcf, 0xb, 0xb, 0xb, 0xfd, 0x8d, 0x38, 0x75, 0xea, 0x54, 0x49, 0x2e, 0x77, 0x62, 0x62, 0xa2, 0xc7, 0xe3, 0x51, 0xd8, 0x50, 0x1d, 0x86, 0xed, 0xb8, 0x29, 0x20, 0x37, 0x37, 0x97, 0xdc, 0x5c, 0xa7, 0xd3, 0xbd, 0xf3, 0xce, 0x3b, 0xfe, 0x56, 0x7a, 0xb, 0x17, 0x2e, 0x94, 0xe4, 0xe4, 0xcb, 0xcb, 0xcb, 0x23, 0x22, 0x22, 0x14, 0xbc, 0x6b, 0x16, 0x84, 0xf6, 0x9b, 0x37, 0xdf, 0x7c, 0x73, 0xe8, 0xe5, 0xef, 0xb5, 0x6c, 0xdf, 0xbe, 0x5d, 0xa3, 0xd1, 0xf8, 0xa5, 0x94, 0xd7, 0xeb, 0xcd, 0xcf, 0xcf, 0xe7, 0x9f, 0xf9, 0x94, 0x29, 0x53, 0x72, 0x72, 0x72, 0x94, 0x7d, 0xd7, 0x84, 0x9b, 0x42, 0xff, 0xbb, 0x8c, 0x91, 0xde, 0xc6, 0x8e, 0x1d, 0x4b, 0x5e, 0x25, 0x5a, 0xab, 0xd5, 0xf6, 0xf7, 0xf7, 0x73, 0xce, 0xbc, 0xad, 0xad, 0x2d, 0x0, 0xcb, 0x46, 0x22, 0x43, 0x87, 0x13, 0xcc, 0x4d, 0x52, 0x36, 0x6c, 0xd8, 0x40, 0x6e, 0x9b, 0x91, 0x91, 0xc1, 0x9, 0x9d, 0x99, 0x99, 0xa9, 0x78, 0x9b, 0x91, 0xa1, 0xfd, 0xa3, 0xa7, 0xa7, 0x47, 0xaf, 0xd7, 0x13, 0x5e, 0x5c, 0xe5, 0xa7, 0x67, 0xdf, 0x48, 0xdf, 0xaf, 0xbf, 0xfe, 0x4a, 0x6e, 0x7e, 0xe5, 0xca, 0x95, 0x11, 0x23, 0x46, 0x28, 0x7e, 0xcf, 0x59, 0x64, 0x68, 0x3f, 0xc8, 0xcd, 0xcd, 0x25, 0xdb, 0xac, 0xd1, 0x68, 0x8e, 0x1d, 0x3b, 0x46, 0x4e, 0x1f, 0x7, 0xf, 0x1e, 0xe4, 0xd8, 0xbc, 0x7c, 0xf9, 0xf2, 0x84, 0x84, 0x84, 0xe1, 0xb0, 0x83, 0x32, 0x32, 0xf4, 0x50, 0xf9, 0xf1, 0xc7, 0x1f, 0x39, 0xf3, 0x90, 0xf6, 0xec, 0xd9, 0xb3, 0x60, 0xc1, 0x2, 0x5a, 0x5b, 0x51, 0x14, 0x93, 0x93, 0x93, 0xbb, 0xbb, 0xbb, 0x69, 0xcd, 0x93, 0x93, 0x93, 0x3b, 0x3a, 0x3a, 0xe4, 0x9e, 0x57, 0xd, 0xa1, 0xc3, 0x8c, 0x31, 0x63, 0xc6, 0xb4, 0xb7, 0xb7, 0xd3, 0xda, 0xe, 0xac, 0xb6, 0x41, 0xcb, 0x91, 0x5b, 0xb6, 0x6c, 0x31, 0x18, 0xc, 0x9c, 0x93, 0xb7, 0xdb, 0xed, 0x81, 0x79, 0x3d, 0x11, 0x25, 0x47, 0x78, 0x50, 0x59, 0x59, 0x49, 0xb6, 0x59, 0x10, 0x4, 0x93, 0xc9, 0x44, 0x1e, 0x1e, 0xe9, 0xeb, 0xeb, 0x63, 0xda, 0xec, 0xab, 0x58, 0x86, 0xc9, 0x95, 0x82, 0xd0, 0x43, 0xfa, 0xc5, 0xe7, 0x4c, 0x4a, 0xbe, 0xfd, 0xf6, 0xdb, 0x39, 0x3, 0x14, 0x65, 0x65, 0x65, 0xfc, 0x8f, 0xf0, 0xd1, 0x47, 0x1f, 0xd, 0x93, 0x8b, 0x85, 0x92, 0xe3, 0xe6, 0x94, 0x97, 0x97, 0x2f, 0x5b, 0xb6, 0x8c, 0xdc, 0x9c, 0xf3, 0xd2, 0x5e, 0x7f, 0x7f, 0xbf, 0x56, 0xab, 0x95, 0x20, 0x6f, 0x45, 0x44, 0xc, 0x93, 0xb5, 0xd0, 0x91, 0xa1, 0x6f, 0x82, 0xcb, 0xe5, 0xe2, 0xd8, 0xbc, 0x70, 0xe1, 0x42, 0xce, 0x4b, 0x7b, 0x9c, 0x67, 0xec, 0xff, 0xf9, 0x91, 0x61, 0x3e, 0x94, 0x81, 0xd0, 0xa, 0xa1, 0xa0, 0xa0, 0x80, 0xdc, 0x56, 0xa3, 0xd1, 0xec, 0xd8, 0xb1, 0x83, 0xfc, 0x1b, 0xb8, 0x61, 0xc3, 0x6, 0x9, 0x97, 0x2, 0xdb, 0xb1, 0x63, 0xc7, 0xb0, 0xb8, 0x60, 0x98, 0x3a, 0x37, 0x8, 0x9f, 0x7c, 0xf2, 0x9, 0xa7, 0x6f, 0x8b, 0x8a, 0x8a, 0xc8, 0xa1, 0x45, 0x51, 0x94, 0x76, 0x99, 0xe7, 0x49, 0x93, 0x26, 0xd, 0x87, 0x4b, 0x86, 0x1a, 0x7a, 0x30, 0x62, 0x63, 0x63, 0xed, 0x76, 0x3b, 0xad, 0x6d, 0x5c, 0x5c, 0x1c, 0x61, 0xed, 0xd0, 0x1, 0xf2, 0xf3, 0xf3, 0xf7, 0xec, 0xd9, 0x23, 0xe1, 0x67, 0x89, 0x89, 0x89, 0x19, 0xe, 0x55, 0x7, 0x4a, 0x8e, 0x1b, 0xb2, 0x75, 0xeb, 0x56, 0xb2, 0xcd, 0xbe, 0x17, 0x60, 0xc9, 0x6d, 0xfb, 0xfb, 0xfb, 0x25, 0xdf, 0x74, 0xd0, 0x6e, 0xb7, 0xff, 0x7b, 0x69, 0x53, 0x94, 0x1c, 0xc3, 0xb, 0x87, 0xc3, 0x11, 0x13, 0x13, 0x43, 0xee, 0xd5, 0x59, 0xb3, 0x66, 0x71, 0xa2, 0x2f, 0x5e, 0xbc, 0x58, 0x8e, 0x6b, 0xbd, 0x71, 0xe3, 0x46, 0xc5, 0x5f, 0x38, 0x8, 0x7d, 0x7d, 0x56, 0xac, 0x58, 0xc1, 0x51, 0xc7, 0xe9, 0x74, 0x92, 0xdf, 0xa, 0x91, 0x6f, 0xe7, 0xd9, 0xe9, 0xd3, 0xa7, 0x2b, 0xfe, 0xc2, 0x61, 0x5d, 0x8e, 0xeb, 0xd0, 0xda, 0xda, 0xca, 0xd9, 0x4e, 0xaa, 0xa4, 0xa4, 0x84, 0x7c, 0x3f, 0xe7, 0xf5, 0x7a, 0xf3, 0xf2, 0xf2, 0x64, 0xfa, 0x5c, 0xa7, 0x4f, 0x9f, 0x56, 0xfc, 0xb5, 0xc3, 0x4d, 0xe1, 0x75, 0x98, 0x36, 0x6d, 0x5a, 0x7d, 0x7d, 0x3d, 0xad, 0x6d, 0x54, 0x54, 0x54, 0x6f, 0x6f, 0xaf, 0x46, 0x43, 0xcc, 0x14, 0x87, 0xe, 0x1d, 0x9a, 0x33, 0x67, 0x8e, 0x7c, 0x1f, 0xad, 0xb1, 0xb1, 0x51, 0xda, 0x57, 0x6e, 0x71, 0x53, 0x18, 0xea, 0x5c, 0xbe, 0x7c, 0x99, 0x6c, 0xb3, 0x20, 0x8, 0xef, 0xbd, 0xf7, 0x1e, 0xd9, 0x66, 0x41, 0x10, 0xae, 0xbb, 0x4a, 0xb4, 0x84, 0x7c, 0xff, 0xfd, 0xf7, 0x18, 0xe5, 0x18, 0x5e, 0x3c, 0xf4, 0xd0, 0x43, 0xe4, 0xb6, 0xf1, 0xf1, 0xf1, 0x6, 0x83, 0x81, 0xfc, 0xa3, 0x57, 0x53, 0x53, 0x63, 0x36, 0x9b, 0x65, 0xfd, 0x74, 0x83, 0xac, 0x74, 0x8a, 0x92, 0x43, 0x81, 0x30, 0x27, 0x3d, 0x1b, 0x8d, 0x46, 0xf2, 0xc3, 0xea, 0xf6, 0xf6, 0xf6, 0xdb, 0x6e, 0xbb, 0x4d, 0xee, 0xcb, 0xa1, 0x52, 0xa9, 0x9c, 0x4e, 0x27, 0xe7, 0x37, 0x4, 0x42, 0x87, 0xd, 0xdd, 0xdd, 0xdd, 0x69, 0x69, 0x69, 0x4e, 0xa7, 0x93, 0xd6, 0x3c, 0x2b, 0x2b, 0x8b, 0x53, 0xab, 0xa4, 0xa7, 0xa7, 0xff, 0xf5, 0xd7, 0x5f, 0x1, 0xf8, 0x98, 0xbf, 0xfd, 0xf6, 0xdb, 0x3d, 0xf7, 0xdc, 0x83, 0x92, 0x63, 0x58, 0x14, 0x1b, 0x64, 0x9b, 0x55, 0x2a, 0xd5, 0x4d, 0x17, 0xc6, 0x1d, 0x84, 0xdd, 0xbb, 0x77, 0x7, 0xc6, 0x66, 0x41, 0x10, 0xbe, 0xf9, 0xe6, 0x1b, 0x25, 0x5f, 0x45, 0xc, 0x39, 0xfb, 0x38, 0x7c, 0xf8, 0x30, 0xa7, 0x1b, 0xe7, 0xcf, 0x9f, 0xcf, 0x89, 0x3e, 0x62, 0xc4, 0x88, 0x80, 0x5d, 0xf1, 0x3b, 0xee, 0xb8, 0x3, 0x73, 0x39, 0x94, 0xf, 0x67, 0xa8, 0x4e, 0xad, 0x56, 0x77, 0x75, 0x75, 0x91, 0xa5, 0xac, 0xaa, 0xaa, 0x5a, 0xb4, 0x68, 0x51, 0x20, 0x3f, 0xac, 0xc3, 0xe1, 0x88, 0x8a, 0x8a, 0x42, 0xc9, 0xa1, 0x58, 0xe, 0x1d, 0x3a, 0xc4, 0x29, 0x7f, 0x6b, 0x6b, 0x6b, 0xc9, 0x36, 0x7b, 0x3c, 0x1e, 0xce, 0x7c, 0x6b, 0x1a, 0x7f, 0xff, 0xfd, 0x37, 0x6a, 0x68, 0xc5, 0x22, 0x8a, 0x22, 0x67, 0xf4, 0x37, 0x3d, 0x3d, 0xfd, 0xd1, 0x47, 0x1f, 0x25, 0x37, 0x7f, 0xf7, 0xdd, 0x77, 0xc9, 0x93, 0xe0, 0xc8, 0xab, 0xd4, 0x55, 0x54, 0x54, 0xa0, 0x86, 0x56, 0x2c, 0x9b, 0x36, 0x6d, 0xe2, 0x74, 0x60, 0x5b, 0x5b, 0x1b, 0x39, 0x34, 0x73, 0xd4, 0x79, 0xdd, 0xba, 0x75, 0x69, 0x69, 0x69, 0x84, 0x86, 0x7a, 0xbd, 0x1e, 0x93, 0x93, 0x94, 0x89, 0xc5, 0x62, 0xe1, 0x28, 0xb5, 0x62, 0xc5, 0xa, 0x4e, 0x74, 0xce, 0x98, 0x77, 0x4c, 0x4c, 0x8c, 0xc3, 0xe1, 0x20, 0x4f, 0xa2, 0x3a, 0x7b, 0xf6, 0x2c, 0x84, 0x56, 0x20, 0xf3, 0xe7, 0xcf, 0xe7, 0x8, 0x6d, 0xb5, 0x5a, 0xc9, 0xb3, 0xea, 0xfe, 0xf8, 0xe3, 0xf, 0x4e, 0xe8, 0xb2, 0xb2, 0x32, 0xaf, 0xd7, 0xdb, 0xd2, 0xd2, 0x42, 0x6b, 0xbe, 0x6c, 0xd9, 0x32, 0x8, 0xad, 0x34, 0xdc, 0x6e, 0x37, 0x47, 0x29, 0xa6, 0x13, 0x9c, 0x49, 0x42, 0xb1, 0xb1, 0xb1, 0x3, 0xc7, 0xa1, 0xcd, 0xdb, 0x4e, 0x4d, 0x4d, 0x55, 0xe4, 0x35, 0x1d, 0xd6, 0x37, 0x85, 0x9c, 0x79, 0x6d, 0x91, 0x91, 0x91, 0x9f, 0x7e, 0xfa, 0x29, 0x79, 0xd0, 0x73, 0xed, 0xda, 0xb5, 0xa7, 0x4e, 0x9d, 0x22, 0x47, 0xdf, 0xb2, 0x65, 0xcb, 0xc0, 0x9f, 0x9f, 0x7c, 0xf2, 0x49, 0xc2, 0x11, 0x98, 0x9b, 0x10, 0xe0, 0xa6, 0x30, 0xe4, 0xd8, 0xb8, 0x71, 0x23, 0xa7, 0xdf, 0x4a, 0x4a, 0x4a, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x67, 0xa5, 0x39, 0xdf, 0x94, 0xf, 0x1f, 0xa2, 0x28, 0x92, 0x4b, 0x97, 0xba, 0xba, 0x3a, 0x94, 0x1c, 0xa, 0xc1, 0xe1, 0x70, 0x70, 0x26, 0xe8, 0xa4, 0xa4, 0xa4, 0x70, 0xa2, 0x73, 0x86, 0xf9, 0x4, 0x41, 0x68, 0x6e, 0x6e, 0xfe, 0x4f, 0xe1, 0x4e, 0x3b, 0xce, 0x82, 0x5, 0xb, 0x20, 0x34, 0xee, 0x5, 0xff, 0x4f, 0x7d, 0x7d, 0x3d, 0xf9, 0x5e, 0xf0, 0xc4, 0x89, 0x13, 0x9c, 0xd0, 0x8f, 0x3d, 0xf6, 0xd8, 0xb5, 0xc7, 0xbc, 0xf3, 0xce, 0x3b, 0x31, 0x78, 0x37, 0x7c, 0x85, 0x66, 0xe, 0x2f, 0x94, 0x96, 0x96, 0x72, 0xa2, 0xd3, 0xe4, 0xf3, 0x91, 0x90, 0x90, 0xe0, 0x70, 0x38, 0xae, 0xfd, 0x2e, 0xd1, 0x96, 0xae, 0x53, 0xa9, 0x54, 0x76, 0xbb, 0x1d, 0x42, 0x87, 0x3d, 0x4b, 0x96, 0x2c, 0xe1, 0x8, 0xed, 0x76, 0xbb, 0xc9, 0xa1, 0x1b, 0x1b, 0x1b, 0x39, 0xa1, 0xab, 0xab, 0xab, 0xa5, 0xfd, 0x8a, 0xfa, 0xd6, 0x60, 0x87, 0xd0, 0x61, 0xcc, 0xd9, 0xb3, 0x67, 0x39, 0x4a, 0x7d, 0xfc, 0xf1, 0xc7, 0xe4, 0xd0, 0x1e, 0x8f, 0x87, 0xf6, 0x60, 0x6f, 0x60, 0xa0, 0xed, 0x46, 0x75, 0xe, 0x79, 0xfd, 0x90, 0x82, 0x82, 0x2, 0x8, 0x1d, 0xc6, 0x88, 0xa2, 0xc8, 0x59, 0x3a, 0x31, 0x21, 0x21, 0xc1, 0xe3, 0xf1, 0x90, 0xa3, 0x57, 0x56, 0x56, 0x72, 0xbe, 0x4b, 0xa7, 0x4e, 0x9d, 0x1a, 0xe4, 0xe0, 0x93, 0x27, 0x4f, 0x26, 0x1c, 0x73, 0xd4, 0xa8, 0x51, 0x10, 0x3a, 0x8c, 0xa9, 0xa9, 0xa9, 0xe1, 0x28, 0x65, 0x32, 0x99, 0xc8, 0xa1, 0xc9, 0x8f, 0xf4, 0x7c, 0xe4, 0xe6, 0xe6, 0xe, 0x7e, 0xfc, 0x6d, 0xdb, 0xb6, 0xd1, 0x8e, 0xdc, 0xd5, 0xd5, 0x5, 0xa1, 0xc3, 0x35, 0x3d, 0x73, 0x76, 0xcd, 0x79, 0xf8, 0xe1, 0x87, 0x7d, 0x7, 0xa1, 0x45, 0x4f, 0x49, 0x49, 0x21, 0x87, 0x56, 0xab, 0xd5, 0xdd, 0xdd, 0xdd, 0x83, 0x84, 0x16, 0x45, 0xf1, 0xcf, 0x3f, 0xff, 0xa4, 0x1d, 0xbc, 0xbc, 0xbc, 0x5c, 0x49, 0x57, 0x79, 0x18, 0x3d, 0x29, 0x7c, 0xfd, 0xf5, 0xd7, 0x39, 0x6f, 0x33, 0x7c, 0xf5, 0xd5, 0x57, 0xe4, 0x6d, 0x25, 0x2a, 0x2a, 0x2a, 0x2e, 0x5f, 0xbe, 0x4c, 0xe, 0x6d, 0x30, 0x18, 0x46, 0x8e, 0x1c, 0x39, 0x48, 0x68, 0x95, 0x4a, 0x95, 0x9e, 0x9e, 0x4e, 0xdb, 0x45, 0x65, 0xef, 0xde, 0xbd, 0x78, 0x52, 0x18, 0x7e, 0x98, 0xcd, 0x66, 0xce, 0xc3, 0xb9, 0x19, 0x33, 0x66, 0x70, 0xa2, 0xa7, 0xa6, 0xa6, 0x72, 0xd2, 0xb3, 0xd3, 0xe9, 0x1c, 0x4a, 0x94, 0x7b, 0xef, 0xbd, 0x97, 0x70, 0xfc, 0xe4, 0xe4, 0x64, 0x64, 0xe8, 0xf0, 0x63, 0xda, 0xb4, 0x69, 0xe4, 0x2d, 0x6, 0x55, 0x2a, 0x55, 0x6d, 0x6d, 0x2d, 0x39, 0xbb, 0x97, 0x95, 0x95, 0x71, 0x26, 0x4e, 0x18, 0xc, 0x86, 0x21, 0x2e, 0x2c, 0x46, 0xdb, 0xf, 0xbc, 0xb3, 0xb3, 0x93, 0xf3, 0xeb, 0x81, 0xc, 0x1d, 0x4, 0x98, 0xbf, 0xaa, 0xcf, 0x3e, 0xfb, 0x2c, 0x39, 0xb4, 0xdb, 0xed, 0xe6, 0x6c, 0x92, 0x12, 0x13, 0x13, 0x33, 0xf4, 0x3b, 0x4, 0x72, 0x19, 0xbd, 0x77, 0xef, 0x5e, 0xdc, 0x14, 0x86, 0x13, 0x9, 0x9, 0x9, 0x9c, 0x59, 0x75, 0x9c, 0xd0, 0xab, 0x56, 0xad, 0xe2, 0x7c, 0x97, 0x8e, 0x1f, 0x3f, 0xee, 0xd7, 0x6d, 0x68, 0x7c, 0x7c, 0x3c, 0x21, 0xca, 0xbc, 0x79, 0xf3, 0x20, 0x74, 0xd8, 0x50, 0x58, 0x58, 0xc8, 0x51, 0x6a, 0xdf, 0xbe, 0x7d, 0xe4, 0x91, 0x8d, 0xb, 0x17, 0x2e, 0x70, 0x42, 0xbf, 0xfd, 0xf6, 0xdb, 0xfe, 0x46, 0x24, 0x4f, 0x7b, 0x72, 0xb9, 0x5c, 0x10, 0x3a, 0xc, 0xe8, 0xec, 0xec, 0xe4, 0x28, 0xb5, 0x6a, 0xd5, 0x2a, 0x4e, 0xf4, 0x99, 0x33, 0x67, 0x92, 0x43, 0x6b, 0x34, 0x9a, 0x21, 0xde, 0xb, 0xfe, 0x1b, 0xf2, 0x2a, 0xc0, 0x1d, 0x1d, 0x1d, 0x10, 0x3a, 0xc, 0x28, 0x2e, 0x2e, 0xe6, 0x8, 0xdd, 0xd9, 0xd9, 0x19, 0xac, 0xef, 0x52, 0x71, 0x71, 0x31, 0x21, 0x28, 0x79, 0x7d, 0x82, 0x6f, 0xbf, 0xfd, 0x16, 0x42, 0x87, 0x3a, 0x57, 0xae, 0x5c, 0xe1, 0x28, 0xe5, 0x5b, 0x47, 0x94, 0x86, 0xc7, 0xe3, 0xe1, 0xbc, 0x61, 0xa5, 0xd5, 0x6a, 0x3d, 0x1e, 0xf, 0xad, 0xd4, 0xd1, 0xe9, 0x74, 0x84, 0x88, 0xf, 0x3c, 0xf0, 0x0, 0x84, 0xe, 0x75, 0x38, 0xf3, 0xe8, 0x23, 0x23, 0x23, 0xed, 0x76, 0x3b, 0xb9, 0x7a, 0x5e, 0xbd, 0x7a, 0x35, 0xe7, 0xbb, 0x54, 0x55, 0x55, 0x45, 0xfe, 0xd4, 0x33, 0x66, 0xcc, 0xa0, 0x5, 0xed, 0xeb, 0xeb, 0x83, 0xd0, 0xa1, 0x4b, 0x53, 0x53, 0x13, 0x47, 0x29, 0xdf, 0x3b, 0xd5, 0x34, 0x7a, 0x7a, 0x7a, 0x38, 0xa1, 0x27, 0x4f, 0x9e, 0x4c, 0xe, 0x2d, 0x8a, 0x62, 0x55, 0x55, 0x15, 0x2d, 0xee, 0xce, 0x9d, 0x3b, 0x21, 0x74, 0x88, 0xe2, 0x74, 0x3a, 0x39, 0x43, 0x75, 0x19, 0x19, 0x19, 0x9c, 0x69, 0x1b, 0x59, 0x59, 0x59, 0xe4, 0xd0, 0x2a, 0x95, 0xaa, 0xa5, 0xa5, 0x85, 0x1c, 0xda, 0x7, 0xed, 0x99, 0xe8, 0xb8, 0x71, 0xe3, 0x14, 0x70, 0xe9, 0x95, 0xf9, 0xa4, 0x30, 0x3b, 0x3b, 0xfb, 0xea, 0xd5, 0xab, 0xe4, 0xe6, 0xbe, 0x1d, 0x2f, 0x69, 0xd3, 0x36, 0x4c, 0x26, 0x53, 0x43, 0x43, 0x3, 0x39, 0xf4, 0xdc, 0xb9, 0x73, 0xd3, 0xd3, 0xd3, 0x39, 0x93, 0xa8, 0x4, 0x41, 0xa0, 0xd5, 0x5a, 0xe4, 0xe7, 0x32, 0x78, 0x52, 0x28, 0x7b, 0x7a, 0x56, 0xab, 0xd5, 0xe4, 0xe, 0x61, 0x2e, 0x58, 0x41, 0x2e, 0x61, 0x7d, 0x98, 0xcd, 0x66, 0x7e, 0xf, 0x90, 0xb7, 0xa0, 0xb5, 0x58, 0x2c, 0xc8, 0xd0, 0x21, 0x47, 0x4e, 0x4e, 0x8e, 0xc7, 0xe3, 0x21, 0x37, 0xf7, 0x4d, 0x7a, 0x26, 0x3f, 0xd8, 0xe3, 0xbc, 0x3, 0x9b, 0x97, 0x97, 0x97, 0x98, 0x98, 0xc8, 0xef, 0x1, 0xf2, 0xb6, 0x18, 0xcc, 0xbd, 0xcd, 0x91, 0xa1, 0xa5, 0xa7, 0xae, 0xae, 0x8e, 0xd3, 0x1b, 0xbe, 0x49, 0xcf, 0xe4, 0x1b, 0xb2, 0x5b, 0x6f, 0xbd, 0x95, 0x1c, 0x7a, 0xf4, 0xe8, 0xd1, 0x12, 0xf6, 0x3, 0xad, 0x68, 0xc9, 0xcc, 0xcc, 0xc4, 0x4d, 0x61, 0x68, 0x91, 0x91, 0x91, 0x41, 0x56, 0x2a, 0x29, 0x29, 0x89, 0xb3, 0x3, 0x2c, 0xf9, 0x9d, 0x11, 0x1f, 0xad, 0xad, 0xad, 0x12, 0xf6, 0xc3, 0x53, 0x4f, 0x3d, 0x45, 0x38, 0x87, 0xe8, 0xe8, 0x68, 0x8, 0x1d, 0x42, 0xec, 0xda, 0xb5, 0x8b, 0xa3, 0xd4, 0x99, 0x33, 0x67, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x27, 0xf4, 0x33, 0xcf, 0x3c, 0x23, 0x6d, 0x57, 0x9c, 0x3c, 0x79, 0x92, 0x76, 0x26, 0x83, 0xbf, 0xb9, 0x8, 0xa1, 0x3, 0x7, 0xf9, 0xcd, 0x67, 0x1f, 0x73, 0xe6, 0xcc, 0x9, 0x7c, 0x46, 0x1c, 0xa0, 0xa7, 0xa7, 0x47, 0xda, 0xde, 0x20, 0x7f, 0xc1, 0x56, 0xae, 0x5c, 0x9, 0xa1, 0x43, 0x82, 0xea, 0xea, 0x6a, 0x8e, 0x52, 0x8d, 0x8d, 0x8d, 0xe4, 0xd0, 0xcd, 0xcd, 0xcd, 0x9c, 0xd0, 0x4b, 0x96, 0x2c, 0x91, 0xbc, 0x37, 0x3c, 0x1e, 0xf, 0xed, 0x19, 0x78, 0x56, 0x56, 0x16, 0x84, 0xe, 0x3e, 0xcc, 0x85, 0x71, 0x99, 0xe9, 0x39, 0x36, 0x36, 0x96, 0x1c, 0x5a, 0xad, 0x56, 0x73, 0x9e, 0xb1, 0xf, 0xc2, 0xf3, 0xcf, 0x3f, 0x4f, 0x38, 0x9f, 0xa8, 0xa8, 0x28, 0xc, 0xdb, 0x5, 0x9f, 0x17, 0x5f, 0x7c, 0x91, 0xf3, 0x70, 0x6e, 0xd7, 0xae, 0x5d, 0xe4, 0xa1, 0xba, 0x6d, 0xdb, 0xb6, 0x91, 0x37, 0x49, 0x11, 0x4, 0x61, 0xfd, 0xfa, 0xf5, 0xd1, 0xd1, 0xd1, 0xcc, 0x27, 0x29, 0xd7, 0xa5, 0xa0, 0xa0, 0x80, 0xd0, 0xca, 0xe9, 0x74, 0xfe, 0xf2, 0xcb, 0x2f, 0x18, 0xb6, 0xb, 0x26, 0xcc, 0x2d, 0x70, 0xde, 0x78, 0xe3, 0xd, 0x72, 0x68, 0x97, 0xcb, 0x35, 0xc4, 0x17, 0xfe, 0x6e, 0x34, 0xae, 0x22, 0x5f, 0xb7, 0x58, 0xad, 0x56, 0xda, 0x59, 0xad, 0x59, 0xb3, 0x6, 0x25, 0x47, 0x30, 0xe1, 0xfc, 0xe2, 0xc7, 0xc7, 0xc7, 0x73, 0xa6, 0x6d, 0x2c, 0x5e, 0xbc, 0x98, 0xf3, 0x5d, 0x32, 0x99, 0x4c, 0x72, 0x14, 0x1b, 0x3, 0x64, 0x66, 0x66, 0x92, 0xa7, 0xb2, 0xa0, 0xe4, 0x8, 0x5a, 0xb1, 0xc1, 0xf9, 0xc5, 0xff, 0xf2, 0xcb, 0x2f, 0xc9, 0xd3, 0x36, 0x5a, 0x5a, 0x5a, 0x38, 0xab, 0x7b, 0x4d, 0x9c, 0x38, 0x71, 0xe6, 0xcc, 0x99, 0x72, 0x14, 0x1b, 0x3, 0xd0, 0x56, 0xd, 0x6e, 0x69, 0x69, 0x21, 0x67, 0x77, 0x94, 0x1c, 0xc1, 0x4c, 0xcf, 0x1a, 0x8d, 0x86, 0x13, 0x9a, 0xb9, 0x61, 0xe6, 0xef, 0xbf, 0xff, 0x2e, 0x77, 0xe7, 0x1c, 0x3c, 0x78, 0x90, 0x76, 0x6e, 0xd, 0xd, 0xd, 0xc8, 0xd0, 0x41, 0xe0, 0xb5, 0xd7, 0x5e, 0xe3, 0xa4, 0xe7, 0x23, 0x47, 0x8e, 0x90, 0xef, 0x5, 0xcf, 0x9d, 0x3b, 0xf7, 0xf9, 0xe7, 0x9f, 0x93, 0x43, 0x6f, 0xde, 0xbc, 0x79, 0xd2, 0xa4, 0x49, 0x72, 0xf7, 0xf, 0xf9, 0x15, 0x7, 0xa3, 0xd1, 0x88, 0xc, 0x1d, 0x68, 0xda, 0xdb, 0xdb, 0x39, 0x1f, 0x7c, 0xdd, 0xba, 0x75, 0xe4, 0xd0, 0xa2, 0x28, 0x4e, 0x9d, 0x3a, 0x95, 0x1c, 0x3a, 0x2e, 0x2e, 0x8e, 0xb3, 0xc8, 0xb4, 0x5f, 0x8c, 0x1f, 0x3f, 0x9e, 0x70, 0x86, 0x69, 0x69, 0x69, 0xb8, 0x29, 0xc, 0x34, 0x9c, 0x5d, 0x2b, 0xa3, 0xa3, 0xa3, 0x39, 0x2f, 0xee, 0x1f, 0x3d, 0x7a, 0x94, 0xf3, 0x5d, 0xa, 0xe4, 0x32, 0xe3, 0xaf, 0xbc, 0xf2, 0xa, 0xed, 0x24, 0xad, 0x56, 0x2b, 0x84, 0xe, 0x1c, 0xe4, 0xb9, 0xa, 0x3e, 0xf6, 0xef, 0xdf, 0x4f, 0xe, 0xdd, 0xdd, 0xdd, 0xcd, 0xd9, 0x70, 0xe8, 0xfe, 0xfb, 0xef, 0xe7, 0x8c, 0xab, 0xf8, 0xfb, 0x4b, 0x42, 0x7e, 0x15, 0xed, 0xa7, 0x9f, 0x7e, 0x82, 0xd0, 0x1, 0xa2, 0xbf, 0xbf, 0x9f, 0xb3, 0xbe, 0xd6, 0xdd, 0x77, 0xdf, 0xcd, 0x51, 0x6a, 0xc2, 0x84, 0x9, 0x9c, 0xef, 0xd2, 0xf9, 0xf3, 0xe7, 0x3, 0xdc, 0x5d, 0xb4, 0x55, 0x49, 0x17, 0x2d, 0x5a, 0x14, 0x8e, 0x6e, 0xa8, 0x38, 0x2b, 0xcc, 0x2, 0x10, 0x6a, 0x44, 0xa0, 0xb, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0xca, 0xe7, 0x7f, 0x1, 0x0, 0x0, 0xff, 0xff, 0x1a, 0xd5, 0xb5, 0x9c, 0xcd, 0x97, 0x3e, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82}
diff --git a/tools/debug/doberman/main.go b/tools/debug/doberman/main.go
deleted file mode 100644
index 70899f3c8..000000000
--- a/tools/debug/doberman/main.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-// doberman will tell you when there's something wrong with the system
-package main
-
-import (
- "bufio"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "os"
- "os/exec"
- "strings"
- "time"
-
- "github.com/gen2brain/beeep"
-)
-
-const mtbNotify = 1 * time.Second
-const retireIn = 100 * time.Millisecond
-
-var tfname string
-
-var testing = flag.Bool("test", false, "test doberman by sending a notification")
-var filename = flag.String("file", "", "name of log file")
-
-func notify(title string, body string) {
- err := beeep.Notify(title, body, tfname)
- if err != nil {
- panic(err)
- }
-}
-
-func main() {
- var err error
-
- flag.Parse()
- if !*testing && *filename == "" {
- fmt.Println("need log file name")
- return
- }
-
- // write logo
- tf, err := os.CreateTemp("", "algorand-logo.png")
- if err != nil {
- panic(err)
- }
- tfname = tf.Name()
- defer func() {
- time.Sleep(retireIn)
- os.Remove(tfname)
- }()
- _, err = tf.Write(logo)
- if err != nil {
- panic(err)
- }
-
- if *testing {
- notify("doberman: TESTING", "woof woof woof")
- return
- }
-
- var input io.ReadCloser
- cmd := exec.Command("tail", "-F", *filename)
- input, err = cmd.StdoutPipe()
- if err != nil {
- panic(err)
- }
- err = cmd.Start()
- if err != nil {
- panic(err)
- }
-
- scanner := bufio.NewScanner(input)
- for scanner.Scan() {
- var obj map[string]interface{}
- line := scanner.Text()
- dec := json.NewDecoder(strings.NewReader(line))
- err := dec.Decode(&obj)
- if err != nil {
- panic(err)
- }
-
- if !(obj["level"] == "warning" || obj["level"] == "error" || obj["level"] == "fatal") {
- continue
- }
-
- fmt.Println(line)
- str := fmt.Sprintf("doberman: %v", obj["level"])
- notify(str, obj["msg"].(string))
-
- time.Sleep(mtbNotify) // throttling
- }
-}
diff --git a/tools/network/resolver_test.go b/tools/network/resolver_test.go
index 9e2de0af9..a00f07ea3 100644
--- a/tools/network/resolver_test.go
+++ b/tools/network/resolver_test.go
@@ -32,10 +32,10 @@ func TestResolver(t *testing.T) {
// start with a resolver that has no specific DNS address defined.
// we want to make sure that it will go to the default DNS server ( 8.8.8.8 )
resolver := Resolver{}
- cname, addrs, err := resolver.LookupSRV(context.Background(), "jabber", "tcp", "gmail.com")
+ cname, addrs, err := resolver.LookupSRV(context.Background(), "telemetry", "tls", "devnet.algodev.network")
require.NoError(t, err)
- require.Equal(t, "_jabber._tcp.gmail.com.", cname)
- require.True(t, len(addrs) > 3)
+ require.Equal(t, "_telemetry._tls.devnet.algodev.network.", cname)
+ require.True(t, len(addrs) == 1)
require.Equal(t, defaultDNSAddress, resolver.EffectiveResolverDNS())
// specify a specific resolver to work with ( cloudflare DNS server is 1.1.1.1 )
@@ -43,10 +43,10 @@ func TestResolver(t *testing.T) {
resolver = Resolver{
dnsAddress: *cloudFlareIPAddr,
}
- cname, addrs, err = resolver.LookupSRV(context.Background(), "jabber", "tcp", "gmail.com")
+ cname, addrs, err = resolver.LookupSRV(context.Background(), "telemetry", "tls", "devnet.algodev.network")
require.NoError(t, err)
- require.Equal(t, "_jabber._tcp.gmail.com.", cname)
- require.True(t, len(addrs) > 3)
+ require.Equal(t, "_telemetry._tls.devnet.algodev.network.", cname)
+ require.True(t, len(addrs) == 1)
require.Equal(t, "1.1.1.1", resolver.EffectiveResolverDNS())
// specify an invalid dns resolver ip address and examine the fail case.
@@ -56,7 +56,7 @@ func TestResolver(t *testing.T) {
}
timingOutContext, timingOutContextFunc := context.WithTimeout(context.Background(), time.Duration(100)*time.Millisecond)
defer timingOutContextFunc()
- cname, addrs, err = resolver.LookupSRV(timingOutContext, "jabber", "tcp", "gmail.com")
+ cname, addrs, err = resolver.LookupSRV(timingOutContext, "telemetry", "tls", "devnet.algodev.network")
require.Error(t, err)
require.Equal(t, "", cname)
require.True(t, len(addrs) == 0)
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index 551a32f34..34a32320a 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -25,6 +25,7 @@ import (
"database/sql"
"errors"
"fmt"
+ "os"
"reflect"
"runtime"
"strings"
@@ -209,14 +210,15 @@ func (db *Accessor) IsSharedCacheConnection() bool {
// Atomic executes a piece of code with respect to the database atomically.
// For transactions where readOnly is false, sync determines whether or not to wait for the result.
// The return error of fn should be a native sqlite3.Error type or an error wrapping it.
-// DO NOT return a custom error - the internal logic of Atmoic expects an sqlite error and uses that value.
+// DO NOT return a custom error - the internal logic of Atomic expects an sqlite error and uses that value.
func (db *Accessor) Atomic(fn idemFn, extras ...interface{}) (err error) {
- return db.atomic(fn, extras...)
+ return db.AtomicContext(context.Background(), fn, extras...)
}
-// Atomic executes a piece of code with respect to the database atomically.
+// AtomicContext executes a piece of code with respect to the database atomically.
// For transactions where readOnly is false, sync determines whether or not to wait for the result.
-func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
+// Like for Atomic, the return error of fn should be a native sqlite3.Error type or an error wrapping it.
+func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...interface{}) (err error) {
atomicDeadline := time.Now().Add(time.Second)
// note that the sql library will drop panics inside an active transaction
@@ -228,6 +230,12 @@ func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
if !ok {
err = fmt.Errorf("%v", r)
}
+
+ buf := make([]byte, 16*1024)
+ stlen := runtime.Stack(buf, false)
+ errstr := string(buf[:stlen])
+ fmt.Fprintf(os.Stderr, "recovered panic in atomic: %s", errstr)
+
}
}()
@@ -237,7 +245,6 @@ func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
var tx *sql.Tx
var conn *sql.Conn
- ctx := context.Background()
for i := 0; (i == 0) || dbretry(err); i++ {
if i > 0 {
@@ -309,7 +316,7 @@ func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
return
}
-// ResetTransactionWarnDeadline allow the atomic function to extend it's warn deadline by setting a new deadline.
+// ResetTransactionWarnDeadline allow the atomic function to extend its warn deadline by setting a new deadline.
// The Accessor can be copied and therefore isn't suitable for multi-threading directly,
// however, the transaction context and transaction object can be used to uniquely associate the request
// with a particular deadline.
diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go
index 75512b80e..ec253f150 100644
--- a/util/metrics/counter_test.go
+++ b/util/metrics/counter_test.go
@@ -39,7 +39,7 @@ func TestMetricCounter(t *testing.T) {
}
// create a http listener.
- port := test.createListener(":0")
+ port := test.createListener("127.0.0.1:0")
metricService := MakeMetricService(&ServiceConfig{
NodeExporterListenAddress: fmt.Sprintf("localhost:%d", port),
@@ -85,7 +85,7 @@ func TestMetricCounterFastInts(t *testing.T) {
}
// create a http listener.
- port := test.createListener(":0")
+ port := test.createListener("127.0.0.1:0")
metricService := MakeMetricService(&ServiceConfig{
NodeExporterListenAddress: fmt.Sprintf("localhost:%d", port),
@@ -132,7 +132,7 @@ func TestMetricCounterMixed(t *testing.T) {
}
// create a http listener.
- port := test.createListener(":0")
+ port := test.createListener("127.0.0.1:0")
metricService := MakeMetricService(&ServiceConfig{
NodeExporterListenAddress: fmt.Sprintf("localhost:%d", port),
diff --git a/util/metrics/gauge_test.go b/util/metrics/gauge_test.go
index 2268fbe6f..41a9edb92 100644
--- a/util/metrics/gauge_test.go
+++ b/util/metrics/gauge_test.go
@@ -38,7 +38,7 @@ func TestMetricGauge(t *testing.T) {
MetricTest: NewMetricTest(),
}
// create a http listener.
- port := test.createListener(":0")
+ port := test.createListener("127.0.0.1:0")
metricService := MakeMetricService(&ServiceConfig{
NodeExporterListenAddress: fmt.Sprintf("localhost:%d", port),
diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go
index f9965d001..d21e8b4c7 100644
--- a/util/metrics/metrics.go
+++ b/util/metrics/metrics.go
@@ -91,4 +91,31 @@ var (
TransactionMessagesDroppedFromBacklog = MetricName{Name: "algod_transaction_messages_dropped_backlog", Description: "Number of transaction messages dropped from backlog"}
// TransactionMessagesDroppedFromPool "Number of transaction messages dropped from pool"
TransactionMessagesDroppedFromPool = MetricName{Name: "algod_transaction_messages_dropped_pool", Description: "Number of transaction messages dropped from pool"}
+ // TransactionMessagesAlreadyCommitted "Number of duplicate or error transaction messages before placing into a backlog"
+ TransactionMessagesAlreadyCommitted = MetricName{Name: "algod_transaction_messages_err_or_committed", Description: "Number of duplicate or error transaction messages after TX handler backlog"}
+ // TransactionMessagesTxGroupInvalidFee "Number of transaction messages with invalid txgroup fee"
+ TransactionMessagesTxGroupInvalidFee = MetricName{Name: "algod_transaction_messages_txgroup_invalid_fee", Description: "Number of transaction messages with invalid txgroup fee"}
+ // TransactionMessagesTxnNotWellFormed "Number of transaction messages not well formed"
+ TransactionMessagesTxnNotWellFormed = MetricName{Name: "algod_transaction_messages_txn_notwell_formed", Description: "Number of transaction messages not well formed"}
+ // TransactionMessagesTxnSigNotWellFormed "Number of transaction messages with bad formed signature"
+ TransactionMessagesTxnSigNotWellFormed = MetricName{Name: "algod_transaction_messages_sig_bad_formed", Description: "Number of transaction messages with bad formed signature"}
+ // TransactionMessagesTxnMsigNotWellFormed "Number of transaction messages with bad formed multisig"
+ TransactionMessagesTxnMsigNotWellFormed = MetricName{Name: "algod_transaction_messages_msig_bas_formed", Description: "Number of transaction messages with bad formed msig"}
+ // TransactionMessagesTxnLogicSig "Number of transaction messages with invalid logic sig"
+ TransactionMessagesTxnLogicSig = MetricName{Name: "algod_transaction_messages_logic_sig_failed", Description: "Number of transaction messages with invalid logic sig"}
+ // TransactionMessagesTxnSigVerificationFailed "Number of transaction messages with signature verification failed"
+ TransactionMessagesTxnSigVerificationFailed = MetricName{Name: "algod_transaction_messages_sig_verify_failed", Description: "Number of transaction messages with signature verification failed"}
+ // TransactionMessagesBacklogErr "Number of transaction messages with some validation error"
+ TransactionMessagesBacklogErr = MetricName{Name: "algod_transaction_messages_backlog_err", Description: "Number of transaction messages with some validation error"}
+ // TransactionMessagesRemember "Number of transaction messages remembered in TX handler"
+ TransactionMessagesRemember = MetricName{Name: "algod_transaction_messages_remember", Description: "Number of transaction messages remembered in TX handler"}
+ // TransactionMessagesBacklogSize "Number of transaction messages in the TX handler backlog queue"
+ TransactionMessagesBacklogSize = MetricName{Name: "algod_transaction_messages_backlog_size", Description: "Number of transaction messages in the TX handler backlog queue"}
+
+ // TransactionGroupTxSyncHandled "Number of transaction groups handled via txsync"
+ TransactionGroupTxSyncHandled = MetricName{Name: "algod_transaction_group_txsync_handled", Description: "Number of transaction groups handled via txsync"}
+ // TransactionGroupTxSyncRemember "Number of transaction groups remembered via txsync"
+ TransactionGroupTxSyncRemember = MetricName{Name: "algod_transaction_group_txsync_remember", Description: "Number of transaction groups remembered via txsync"}
+ // TransactionGroupTxSyncAlreadyCommitted "Number of duplicate or error transaction groups received via txsync"
+ TransactionGroupTxSyncAlreadyCommitted = MetricName{Name: "algod_transaction_group_txsync_err_or_committed", Description: "Number of duplicate or error transaction groups received via txsync"}
)
diff --git a/util/tcpinfo.go b/util/tcpinfo.go
new file mode 100644
index 000000000..2b4c69d29
--- /dev/null
+++ b/util/tcpinfo.go
@@ -0,0 +1,72 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "errors"
+ "net"
+ "syscall"
+)
+
+// TCPInfo provides socket-level TCP information.
+type TCPInfo struct {
+ RTT uint32 `json:",omitempty"` // smoothed RTT
+ RTTVar uint32 `json:",omitempty"` // RTT variance
+ RTTMin uint32 `json:",omitempty"` // smallest observed RTT on the connection
+ SndMSS, RcvMSS uint32 `json:",omitempty"` // send and receive maximum segment size
+ SndCwnd uint32 `json:",omitempty"` // sender congestion window
+ SndWnd uint32 `json:",omitempty"` // send window advertised to receiver
+ RcvWnd uint32 `json:",omitempty"` // receive window advertised to sender
+ // tcpi_delivery_rate: The most recent goodput, as measured by
+ // tcp_rate_gen(). If the socket is limited by the sending
+ // application (e.g., no data to send), it reports the highest
+ // measurement instead of the most recent. The unit is bytes per
+ // second (like other rate fields in tcp_info).
+ Rate uint64 `json:",omitempty"`
+ // tcpi_delivery_rate_app_limited: A boolean indicating if the goodput
+ // was measured when the socket's throughput was limited by the
+ // sending application.
+ AppLimited bool `json:",omitempty"`
+}
+
+var (
+ // ErrNotSyscallConn is reported when GetConnTCPInfo is passed a connection that doesn't satisfy the syscall.Conn interface.
+ ErrNotSyscallConn = errors.New("conn doesn't satisfy syscall.Conn")
+ // ErrTCPInfoUnsupported is reported if TCP information is not available for this platform.
+ ErrTCPInfoUnsupported = errors.New("GetConnRTT not supported on this platform")
+ // ErrNoTCPInfo is reported if getsockopt returned no TCP info for some reason.
+ ErrNoTCPInfo = errors.New("getsockopt returned no TCP info")
+)
+
+// GetConnTCPInfo returns statistics for a TCP connection collected by the
+// underlying network implementation, using a system call on Linux and Mac
+// and returning an error for unsupported platforms.
+func GetConnTCPInfo(conn net.Conn) (*TCPInfo, error) {
+ if conn == nil {
+ return nil, ErrNotSyscallConn
+ }
+ sysconn, ok := conn.(syscall.Conn)
+ if sysconn == nil || !ok {
+ return nil, ErrNotSyscallConn
+ }
+ raw, err := sysconn.SyscallConn()
+ if err != nil {
+ return nil, err
+ }
+
+ return getConnTCPInfo(raw)
+}
diff --git a/util/tcpinfo_darwin.go b/util/tcpinfo_darwin.go
new file mode 100644
index 000000000..cae19d06d
--- /dev/null
+++ b/util/tcpinfo_darwin.go
@@ -0,0 +1,49 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func getConnTCPInfo(raw syscall.RawConn) (*TCPInfo, error) {
+ var info *unix.TCPConnectionInfo
+ var getSockoptErr error
+ err := raw.Control(func(fd uintptr) {
+ info, getSockoptErr = unix.GetsockoptTCPConnectionInfo(int(fd), unix.IPPROTO_TCP, unix.TCP_CONNECTION_INFO)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if getSockoptErr != nil {
+ return nil, getSockoptErr
+ }
+ if info == nil {
+ return nil, ErrNoTCPInfo
+ }
+ return &TCPInfo{
+ RTT: info.Srtt,
+ RTTVar: info.Rttvar,
+ SndMSS: info.Maxseg, // MSS is the same for snd/rcv according bsd/netinet/tcp_usrreq.c
+ RcvMSS: info.Maxseg,
+ SndCwnd: info.Snd_cwnd, // Send congestion window
+ SndWnd: info.Snd_wnd, // Advertised send window
+ RcvWnd: info.Rcv_wnd, // Advertised recv window
+ }, nil
+}
diff --git a/util/tcpinfo_linux.go b/util/tcpinfo_linux.go
new file mode 100644
index 000000000..3da707e1c
--- /dev/null
+++ b/util/tcpinfo_linux.go
@@ -0,0 +1,129 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func getConnTCPInfo(raw syscall.RawConn) (*TCPInfo, error) {
+ var info linuxTCPInfo
+ size := unsafe.Sizeof(info)
+
+ var errno syscall.Errno
+ err := raw.Control(func(fd uintptr) {
+ _, _, errno = unix.Syscall6(unix.SYS_GETSOCKOPT, fd, unix.IPPROTO_TCP, unix.TCP_INFO,
+ uintptr(unsafe.Pointer(&info)), uintptr(unsafe.Pointer(&size)), 0)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+ if info == (linuxTCPInfo{}) {
+ return nil, ErrNoTCPInfo
+ }
+ return &TCPInfo{
+ RTT: info.rtt,
+ RTTVar: info.rttvar,
+ RTTMin: info.min_rtt,
+ SndMSS: info.snd_mss,
+ RcvMSS: info.rcv_mss,
+ SndCwnd: info.snd_cwnd, // Send congestion window
+ RcvWnd: info.snd_wnd, // "tp->snd_wnd, the receive window that the receiver has advertised to the sender."
+ Rate: info.delivery_rate,
+ AppLimited: bool((info.app_limited >> 7) != 0), // get first bit
+ }, nil
+}
+
+// linuxTCPInfo is based on linux include/uapi/linux/tcp.h struct tcp_info
+//revive:disable:var-naming
+type linuxTCPInfo struct {
+ state uint8
+ ca_state uint8
+ retransmits uint8
+ probes uint8
+ backoff uint8
+ options uint8
+ wscale uint8 // __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ app_limited uint8 // __u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2;
+
+ rto uint32
+ ato uint32
+ snd_mss uint32
+ rcv_mss uint32
+
+ unacked uint32
+ sacked uint32
+ lost uint32
+ retrans uint32
+ fackets uint32
+
+ last_data_sent uint32
+ last_ack_sent uint32
+ last_data_recv uint32
+ last_ack_recv uint32
+
+ pmtu uint32
+ rcv_ssthresh uint32
+ rtt uint32
+ rttvar uint32
+ snd_ssthresh uint32
+ snd_cwnd uint32
+ advmss uint32
+ reordering uint32
+
+ rcv_rtt uint32
+ rcv_space uint32
+
+ total_retrans uint32
+
+ // extended info beyond what's in syscall.TCPInfo
+ pacing_rate uint64
+ max_pacing_rate uint64
+ byte_acked uint64
+ bytes_received uint64
+ segs_out uint32
+ segs_in uint32
+
+ notsent_bytes uint32
+ min_rtt uint32
+ data_segs_in uint32
+ data_segs_out uint32
+
+ delivery_rate uint64
+
+ busy_time uint64
+ rwnd_limited uint64
+ sndbuf_limited uint64
+
+ delivered uint32
+ delivered_ce uint32
+
+ bytes_sent uint64
+ bytes_retrans uint64
+ dsack_dups uint32
+ reord_seen uint32
+
+ rcv_ooopack uint32
+
+ snd_wnd uint32
+}
diff --git a/ledger/internal/export_test.go b/util/tcpinfo_noop.go
index a772d53b9..a155ed929 100644
--- a/ledger/internal/export_test.go
+++ b/util/tcpinfo_noop.go
@@ -14,15 +14,13 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+//go:build !linux && !darwin
+// +build !linux,!darwin
-// Export for testing only. See
-// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd for a
-// nice explanation. tl;dr: Since some of our testing is in logic_test package,
-// we export some extra things to make testing easier there. But we do it in a
-// _test.go file, so they are only exported during testing.
+package util
-// In order to generate a block
-func (eval *BlockEvaluator) SetGenerate(g bool) {
- eval.generate = g
+import "syscall"
+
+func getConnTCPInfo(conn syscall.RawConn) (*TCPInfo, error) {
+ return nil, ErrTCPInfoUnsupported
}