summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2021-12-07 10:29:26 -0500
committerGitHub <noreply@github.com>2021-12-07 10:29:26 -0500
commitb6cbbf34a8837b9f8161cd29b0b034ee0b4e4942 (patch)
treec5395e40afe20937e23bdb51ff433c9e19047a72
parentb619b940e56165184c8e94bcbd13b656b773918c (diff)
parent2baf39b72b6db24c9e0cbdecfe8325dcfffaa30c (diff)
Merge pull request #3279 from Algo-devops-service/relstable3.2.1v3.2.1-stable
go-algorand 3.2.1-stable
-rw-r--r--.circleci/config.yml936
-rw-r--r--.travis.yml222
-rw-r--r--Makefile2
-rw-r--r--README.md2
-rw-r--r--agreement/abstractions.go18
-rw-r--r--agreement/agreementtest/keyManager.go9
-rw-r--r--agreement/agreementtest/simulate_test.go8
-rw-r--r--agreement/asyncVoteVerifier.go16
-rw-r--r--agreement/asyncVoteVerifier_test.go50
-rw-r--r--agreement/common_test.go15
-rw-r--r--agreement/cryptoVerifier.go9
-rw-r--r--agreement/cryptoVerifier_test.go27
-rw-r--r--agreement/demux_test.go4
-rw-r--r--agreement/fuzzer/fuzzer_test.go3
-rw-r--r--agreement/fuzzer/ledger_test.go9
-rw-r--r--agreement/keyManager_test.go74
-rw-r--r--agreement/msgp_gen.go1283
-rw-r--r--agreement/player_permutation_test.go3
-rw-r--r--agreement/proposal.go2
-rw-r--r--agreement/proposalStore_test.go17
-rw-r--r--agreement/proposal_test.go7
-rw-r--r--agreement/pseudonode.go115
-rw-r--r--agreement/pseudonode_test.go106
-rw-r--r--agreement/selector.go4
-rw-r--r--agreement/service_test.go18
-rw-r--r--catchup/peerSelector_test.go5
-rw-r--r--catchup/service.go40
-rw-r--r--catchup/service_test.go10
-rw-r--r--catchup/universalFetcher.go9
-rw-r--r--catchup/universalFetcher_test.go4
-rw-r--r--cmd/catchpointdump/file.go2
-rw-r--r--cmd/catchpointdump/net.go3
-rw-r--r--cmd/goal/account.go175
-rw-r--r--cmd/goal/application.go464
-rw-r--r--cmd/goal/clerk.go86
-rw-r--r--cmd/goal/commands.go18
-rw-r--r--cmd/goal/common.go18
-rw-r--r--cmd/goal/multisig.go3
-rw-r--r--cmd/pingpong/runCmd.go2
-rw-r--r--cmd/tealdbg/cdtState.go6
-rw-r--r--cmd/tealdbg/local.go91
-rw-r--r--cmd/tealdbg/localLedger.go3
-rw-r--r--cmd/tealdbg/local_test.go332
-rw-r--r--cmd/tealdbg/main.go2
-rw-r--r--cmd/updater/versionCmd.go14
-rw-r--r--compactcert/abstractions.go4
-rw-r--r--compactcert/worker.go4
-rw-r--r--compactcert/worker_test.go37
-rw-r--r--config/config.go458
-rw-r--r--config/consensus.go14
-rw-r--r--config/defaultsGenerator/defaultsGenerator.go2
-rw-r--r--config/localTemplate.go500
-rw-r--r--config/local_defaults.go187
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.go2
-rw-r--r--crypto/compactcert/builder.go4
-rw-r--r--crypto/compactcert/builder_test.go12
-rw-r--r--crypto/compactcert/msgp_gen.go160
-rw-r--r--crypto/compactcert/msgp_gen_test.go60
-rw-r--r--crypto/compactcert/structs.go32
-rw-r--r--daemon/algod/api/algod.oas2.json324
-rw-r--r--daemon/algod/api/algod.oas3.yml395
-rw-r--r--daemon/algod/api/client/restClient.go25
-rw-r--r--daemon/algod/api/server/v1/handlers/handlers.go3
-rw-r--r--daemon/algod/api/server/v2/dryrun.go3
-rw-r--r--daemon/algod/api/server/v2/errors.go1
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go390
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go63
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go339
-rw-r--r--daemon/algod/api/server/v2/generated/types.go47
-rw-r--r--daemon/algod/api/server/v2/handlers.go155
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go3
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go19
-rw-r--r--data/abi/abi_encode.go562
-rw-r--r--data/abi/abi_encode_test.go1003
-rw-r--r--data/abi/abi_json.go254
-rw-r--r--data/abi/abi_json_test.go123
-rw-r--r--data/abi/abi_type.go470
-rw-r--r--data/abi/abi_type_test.go613
-rw-r--r--data/account/msgp_gen.go238
-rw-r--r--data/account/msgp_gen_test.go73
-rw-r--r--data/account/participation.go44
-rw-r--r--data/account/participationRegistry.go953
-rw-r--r--data/account/participationRegistry_test.go769
-rw-r--r--data/accountManager.go96
-rw-r--r--data/basics/ccertpart.go50
-rw-r--r--data/basics/fields_test.go201
-rw-r--r--data/basics/msgp_gen.go160
-rw-r--r--data/basics/msgp_gen_test.go60
-rw-r--r--data/basics/userBalance.go52
-rw-r--r--data/basics/userBalance_test.go40
-rw-r--r--data/bookkeeping/block.go15
-rw-r--r--data/bookkeeping/msgp_gen.go922
-rw-r--r--data/bookkeeping/msgp_gen_test.go60
-rw-r--r--data/committee/committee.go2
-rw-r--r--data/committee/common_test.go2
-rw-r--r--data/committee/credential_test.go16
-rw-r--r--data/datatest/impls.go10
-rw-r--r--data/ledger.go10
-rw-r--r--data/ledger_test.go34
-rw-r--r--data/pools/transactionPool.go66
-rw-r--r--data/pools/transactionPool_test.go8
-rw-r--r--data/transactions/logic/README.md15
-rw-r--r--data/transactions/logic/README_in.md8
-rw-r--r--data/transactions/logic/TEAL_opcodes.md21
-rw-r--r--data/transactions/logic/assembler.go38
-rw-r--r--data/transactions/logic/assembler_test.go62
-rw-r--r--data/transactions/logic/doc.go14
-rw-r--r--data/transactions/logic/eval.go266
-rw-r--r--data/transactions/logic/evalAppTxn_test.go162
-rw-r--r--data/transactions/logic/evalStateful_test.go2
-rw-r--r--data/transactions/logic/eval_test.go69
-rw-r--r--data/transactions/logic/fields.go36
-rw-r--r--data/transactions/logic/opcodes.go3
-rw-r--r--data/transactions/logictest/ledger.go23
-rw-r--r--data/transactions/teal.go3
-rw-r--r--data/txntest/txn.go18
-rw-r--r--go.mod7
-rw-r--r--go.sum10
-rw-r--r--installer/config.json.example9
-rw-r--r--ledger/README.md5
-rw-r--r--ledger/accountdb.go76
-rw-r--r--ledger/accountdb_test.go361
-rw-r--r--ledger/acctupdates.go1651
-rw-r--r--ledger/acctupdates_test.go659
-rw-r--r--ledger/applications_test.go426
-rw-r--r--ledger/apply/apply.go23
-rw-r--r--ledger/apptxn_test.go561
-rw-r--r--ledger/archival_test.go22
-rw-r--r--ledger/blockqueue_test.go5
-rw-r--r--ledger/bulletin.go25
-rw-r--r--ledger/catchpointtracker.go901
-rw-r--r--ledger/catchpointtracker_test.go415
-rw-r--r--ledger/catchpointwriter.go2
-rw-r--r--ledger/catchpointwriter_test.go24
-rw-r--r--ledger/catchupaccessor.go10
-rw-r--r--ledger/catchupaccessor_test.go11
-rw-r--r--ledger/eval_test.go1831
-rw-r--r--ledger/evalbench_test.go440
-rw-r--r--ledger/evalindexer.go194
-rw-r--r--ledger/evalindexer_test.go307
-rw-r--r--ledger/internal/appcow.go (renamed from ledger/appcow.go)22
-rw-r--r--ledger/internal/appcow_test.go (renamed from ledger/appcow_test.go)45
-rw-r--r--ledger/internal/applications.go (renamed from ledger/applications.go)17
-rw-r--r--ledger/internal/applications_test.go353
-rw-r--r--ledger/internal/assetcow.go (renamed from ledger/assetcow.go)2
-rw-r--r--ledger/internal/compactcert.go (renamed from ledger/compactcert.go)2
-rw-r--r--ledger/internal/compactcert_test.go (renamed from ledger/compactcert_test.go)2
-rw-r--r--ledger/internal/cow.go (renamed from ledger/cow.go)46
-rw-r--r--ledger/internal/cow_test.go (renamed from ledger/cow_test.go)13
-rw-r--r--ledger/internal/eval.go (renamed from ledger/eval.go)512
-rw-r--r--ledger/internal/eval_blackbox_test.go1081
-rw-r--r--ledger/internal/eval_test.go1030
-rw-r--r--ledger/internal/evalindexer.go51
-rw-r--r--ledger/ledger.go203
-rw-r--r--ledger/ledger_perf_test.go20
-rw-r--r--ledger/ledger_test.go110
-rw-r--r--ledger/ledgercore/error.go25
-rw-r--r--ledger/ledgercore/misc.go51
-rw-r--r--ledger/ledgercore/onlineacct.go (renamed from agreement/fuzzer/keyManager_test.go)28
-rw-r--r--ledger/ledgercore/statedelta.go3
-rw-r--r--ledger/ledgercore/validatedBlock.go59
-rw-r--r--ledger/ledgercore/votersForRound.go164
-rw-r--r--ledger/metrics.go26
-rw-r--r--ledger/msgp_gen.go54
-rw-r--r--ledger/notifier.go26
-rw-r--r--ledger/onlinetopheap.go (renamed from ledger/onlineacct.go)23
-rw-r--r--ledger/onlinetopheap_test.go (renamed from ledger/onlineacct_test.go)11
-rw-r--r--ledger/perf_test.go52
-rw-r--r--ledger/testing/accountsTotals.go41
-rw-r--r--ledger/testing/initState.go111
-rw-r--r--ledger/testing/randomAccounts.go344
-rw-r--r--ledger/testing/testGenesis.go137
-rw-r--r--ledger/time.go67
-rw-r--r--ledger/tracker.go552
-rw-r--r--ledger/trackerdb.go365
-rw-r--r--ledger/txtail.go26
-rw-r--r--ledger/txtail_test.go8
-rw-r--r--ledger/voters.go161
-rw-r--r--libgoal/libgoal.go83
-rw-r--r--libgoal/participation.go15
-rw-r--r--logging/testingLogger.go2
-rw-r--r--netdeploy/remote/deployedNetwork.go5
-rw-r--r--network/phonebook_test.go34
-rw-r--r--network/ping.go70
-rw-r--r--network/ping_test.go74
-rw-r--r--network/wsNetwork.go88
-rw-r--r--network/wsNetwork_test.go6
-rw-r--r--network/wsPeer.go9
-rw-r--r--node/assemble_test.go1
-rw-r--r--node/impls.go4
-rw-r--r--node/netprio.go10
-rw-r--r--node/node.go195
-rw-r--r--node/node_test.go48
-rw-r--r--protocol/hash.go1
-rw-r--r--rpcs/blockService.go5
-rw-r--r--rpcs/blockService_test.go5
-rwxr-xr-xscripts/archtype.sh5
-rwxr-xr-xscripts/install_linux_deps.sh2
-rwxr-xr-xscripts/ostype.sh5
-rw-r--r--scripts/release/mule/Makefile.mule4
-rwxr-xr-xscripts/travis/build.sh7
-rwxr-xr-xscripts/travis/codegen_verification.sh2
-rwxr-xr-xscripts/travis/deploy_packages.sh4
-rw-r--r--test/README.md4
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp15
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp12
-rw-r--r--test/e2e-go/features/devmode/devmode_test.go65
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go164
-rw-r--r--test/e2e-go/features/participation/participationExpiration_test.go196
-rw-r--r--test/e2e-go/features/partitionRecovery/partitionRecovery_test.go12
-rw-r--r--test/e2e-go/features/transactions/accountv2_test.go45
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go34
-rw-r--r--test/e2e-go/restAPI/restClient_test.go126
-rw-r--r--test/e2e-go/upgrades/rekey_support_test.go32
-rw-r--r--test/framework/fixtures/fixture.go3
-rw-r--r--test/framework/fixtures/libgoalFixture.go1
-rw-r--r--test/heapwatch/heapWatch.py3
-rw-r--r--test/heapwatch/metrics_delta.py63
-rw-r--r--test/muleCI/Jenkinsfile2
-rw-r--r--test/muleCI/mule.yaml6
-rwxr-xr-xtest/scripts/e2e.sh27
-rwxr-xr-xtest/scripts/e2e_subs/app-assets.sh266
-rwxr-xr-xtest/scripts/e2e_subs/app-group.py77
-rwxr-xr-xtest/scripts/e2e_subs/app-rekey.py81
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-arg.sh37
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-method.sh79
-rwxr-xr-xtest/scripts/e2e_subs/e2e-logs.sh63
-rwxr-xr-xtest/scripts/e2e_subs/example.py127
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-information.sh45
-rw-r--r--test/scripts/e2e_subs/goal/.gitignore1
-rwxr-xr-xtest/scripts/e2e_subs/goal/__init__.py1
-rwxr-xr-xtest/scripts/e2e_subs/goal/goal.py374
-rwxr-xr-xtest/scripts/e2e_subs/rest-participation-key.sh41
-rwxr-xr-xtest/scripts/e2e_subs/rest.sh185
-rw-r--r--test/scripts/e2e_subs/tealprogs/app-abi-arg.teal73
-rw-r--r--test/scripts/e2e_subs/tealprogs/app-abi-method-example.teal176
-rw-r--r--test/scripts/e2e_subs/tealprogs/assets-escrow.teal328
-rw-r--r--test/scripts/e2e_subs/tealprogs/logs.teal19
-rw-r--r--test/testdata/configs/config-v17.json96
-rw-r--r--test/testdata/configs/config-v18.json96
-rw-r--r--test/testdata/configs/config-v19.json97
-rw-r--r--test/testdata/deployednettemplates/hosttemplates/hosttemplates.json24
-rw-r--r--test/testdata/nettemplates/DevModeOneWallet.json22
-rw-r--r--test/testdata/nettemplates/TenNodesDistributedMultiWallet.json2
-rw-r--r--test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json36
-rw-r--r--test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json36
-rw-r--r--util/db/dbutil.go2
-rw-r--r--util/db/initialize.go123
-rw-r--r--util/db/initialize_test.go246
-rw-r--r--util/s3/s3Helper.go11
251 files changed, 23777 insertions, 9436 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index a5d9637eb..27c616436 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -5,244 +5,192 @@ orbs:
go: circleci/go@1.7.0
slack: circleci/slack@4.4.2
+parameters:
+ ubuntu_image:
+ type: string
+ default: "ubuntu-2004:202104-01"
+ build_dir:
+ type: string
+ default: "/opt/cibuild"
+ result_path:
+ type: string
+ default: "/tmp/build_test_results"
+
+executors:
+ amd64_medium:
+ machine:
+ image: << pipeline.parameters.ubuntu_image >>
+ resource_class: medium
+ amd64_large:
+ machine:
+ image: << pipeline.parameters.ubuntu_image >>
+ resource_class: large
+ arm64_medium:
+ machine:
+ image: << pipeline.parameters.ubuntu_image >>
+ resource_class: arm.medium
+ arm64_large:
+ machine:
+ image: << pipeline.parameters.ubuntu_image >>
+ resource_class: arm.large
+ mac_amd64_medium:
+ macos:
+ xcode: 12.0.1
+ resource_class: medium
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "true"
+ mac_amd64_large:
+ macos:
+ xcode: 12.0.1
+ resource_class: large
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "true"
+
workflows:
version: 2
- build_pr:
+ "circleci_build_and_test":
jobs:
- codegen_verification
- - amd64_build
- - amd64_test:
- requires:
- - amd64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - amd64_test_nightly:
- requires:
- - amd64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - amd64_integration:
- requires:
- - amd64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - amd64_integration_nightly:
- requires:
- - amd64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - amd64_e2e_subs:
- requires:
- - amd64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - amd64_e2e_subs_nightly:
- requires:
- - amd64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - amd64_e2e_expect:
- requires:
- - amd64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - amd64_e2e_expect_nightly:
- requires:
- - amd64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - arm64_build
- - arm64_test:
- requires:
- - arm64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - arm64_test_nightly:
- requires:
- - arm64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - arm64_integration:
- requires:
- - arm64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - arm64_integration_nightly:
- requires:
- - arm64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - arm64_e2e_subs:
- requires:
- - arm64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - arm64_e2e_subs_nightly:
- requires:
- - arm64_build
- filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - arm64_e2e_expect:
+
+ - build:
+ name: << matrix.platform >>_build
+ matrix: &matrix-default
+ parameters:
+ platform: ["amd64", "arm64", "mac_amd64"]
+
+ - test:
+ name: << matrix.platform >>_test
+ matrix:
+ <<: *matrix-default
requires:
- - arm64_build
- filters:
+ - << matrix.platform >>_build
+ filters: &filters-default
branches:
ignore:
- /rel\/.*/
- /hotfix\/.*/
- - arm64_e2e_expect_nightly:
+
+ - test_nightly:
+ name: << matrix.platform >>_test_nightly
+ matrix:
+ <<: *matrix-default
requires:
- - arm64_build
- filters:
+ - << matrix.platform >>_build
+ filters: &filters-nightly
branches:
only:
- /rel\/.*/
- /hotfix\/.*/
context: slack-secrets
- - mac_amd64_build
- - mac_amd64_test:
+
+ - integration:
+ name: << matrix.platform >>_integration
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - mac_amd64_test_nightly:
+ <<: *filters-default
+
+ - integration_nightly:
+ name: << matrix.platform >>_integration_nightly
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
+ <<: *filters-nightly
context: slack-secrets
- - mac_amd64_integration:
+
+ - e2e_expect:
+ name: << matrix.platform >>_e2e_expect
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - mac_amd64_integration_nightly:
+ <<: *filters-default
+
+ - e2e_expect_nightly:
+ name: << matrix.platform >>_e2e_expect_nightly
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
+ <<: *filters-nightly
context: slack-secrets
- - mac_amd64_e2e_subs:
+
+ - e2e_subs:
+ name: << matrix.platform >>_e2e_subs
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - mac_amd64_e2e_subs_nightly:
+ <<: *filters-default
+
+ - e2e_subs_nightly:
+ name: << matrix.platform >>_e2e_subs_nightly
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_build
filters:
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
+ <<: *filters-nightly
context: slack-secrets
- - mac_amd64_e2e_expect:
+
+ - tests_verification_job:
+ name: << matrix.platform >>_<< matrix.job_type >>_verification
+ matrix:
+ parameters:
+ platform: ["amd64", "arm64", "mac_amd64"]
+ job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
requires:
- - mac_amd64_build
- filters:
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- - mac_amd64_e2e_expect_nightly:
+ - << matrix.platform >>_<< matrix.job_type >>
+
+ - upload_binaries:
+ name: << matrix.platform >>_upload_binaries
+ matrix:
+ <<: *matrix-default
requires:
- - mac_amd64_build
+ - << matrix.platform >>_test_nightly_verification
+ - << matrix.platform >>_integration_nightly_verification
+ - << matrix.platform >>_e2e_expect_nightly_verification
+ - << matrix.platform >>_e2e_subs_nightly
+ - codegen_verification
filters:
branches:
only:
- /rel\/.*/
- - /hotfix\/.*/
- context: slack-secrets
- - tests_verification_job:
- name: << matrix.job_type >>_<< matrix.job_version >>_verification
- matrix:
- parameters:
- job_type: ["amd64", "arm64", "mac_amd64"]
- job_version: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
- requires:
- - << matrix.job_type >>_<< matrix.job_version >>
+ context:
+ - slack-secrets
+ - aws-secrets
+
#- windows_x64_build
commands:
prepare_go:
description: Clean out existing Go so we can use our preferred version
+ steps:
+ - run: |
+ sudo rm -rf ${HOME}/.go_workspace /usr/local/go
+
+ prepare_build_dir:
+ description: Set up build directory
parameters:
- circleci_home:
+ build_dir:
type: string
- default: "/home/circleci"
+ default: << pipeline.parameters.build_dir >>
steps:
- - run: |
- sudo rm -rf << parameters.circleci_home >>/.go_workspace /usr/local/go
+ - run:
+ working_directory: /tmp
+ command: |
+ sudo mkdir -p << parameters.build_dir >>
+ sudo chown -R $USER:$GROUP << parameters.build_dir >>
+
prepare_windows:
description: Prepare windows image
- parameters:
- circleci_home:
- type: string
- default: "/home/circleci"
steps:
- run:
name: install deps
@@ -261,35 +209,38 @@ commands:
generic_build:
description: Run basic build and store in workspace for re-use by different architectures
parameters:
- circleci_home:
+ build_dir:
type: string
- default: "/home/circleci"
+ default: << pipeline.parameters.build_dir >>
steps:
- restore_libsodium
- restore_cache:
keys:
- - 'go-mod-1-14-7-v1-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ - 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
- restore_cache:
keys:
- - 'go-cache-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
- run:
name: scripts/travis/build.sh --make_debug
command: |
- export PATH=$(echo "$PATH" | sed -e 's|:<< parameters.circleci_home >>/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
- export GOPATH="<< parameters.circleci_home >>/go"
+ export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
+ export GOPATH="<< parameters.build_dir >>/go"
export ALGORAND_DEADLOCK=enable
+ export GIMME_INSTALL_DIR=<< parameters.build_dir >>
+ export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs
+ export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions
scripts/travis/build.sh --make_debug
- cache_libsodium
- save_cache:
- key: 'go-mod-1-14-7-v1-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ key: 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
paths:
- - << parameters.circleci_home >>/go/pkg/mod
+ - << parameters.build_dir >>/go/pkg/mod
- save_cache:
- key: 'go-cache-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
+ key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
paths:
- tmp/go-cache
- persist_to_workspace:
- root: << parameters.circleci_home >>
+ root: << parameters.build_dir >>
paths:
- project
- go
@@ -305,7 +256,7 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- save_cache:
- key: 'libsodium-fork-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ key: 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
paths:
- crypto/libs
@@ -319,14 +270,16 @@ commands:
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- restore_cache:
keys:
- - 'libsodium-fork-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ - 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
- generic_buildtest:
+ generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
parameters:
- circleci_home:
+ platform:
+ type: string
+ build_dir:
type: string
- default: "/home/circleci"
+ default: << pipeline.parameters.build_dir >>
result_subdir:
type: string
no_output_timeout:
@@ -337,33 +290,38 @@ commands:
default: ""
result_path:
type: string
- default: "/tmp/results"
+ default: << pipeline.parameters.result_path >>
steps:
- attach_workspace:
- at: << parameters.circleci_home >>
- - run: mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/$CIRCLE_NODE_INDEX
+ at: << parameters.build_dir >>
+ - run: |
+ mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
+ touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
+ touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- restore_cache:
keys:
- - 'go-cache-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
- run:
name: Run build tests
no_output_timeout: << parameters.no_output_timeout >>
command: |
set -e
set -x
- export PATH=$(echo "$PATH" | sed -e 's|:<< parameters.circleci_home >>/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
+ export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
export KMD_NOUSB=True
- export GOPATH="<< parameters.circleci_home >>/go"
+ export GOPATH="<< parameters.build_dir >>/go"
export PATH="${PATH}:${GOPATH}/bin"
export ALGORAND_DEADLOCK=enable
+ export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs
+ export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions
GOLANG_VERSION=$(./scripts/get_golang_version.sh)
- eval "$(~/gimme "${GOLANG_VERSION}")"
+ eval "$(<< parameters.build_dir >>/gimme "${GOLANG_VERSION}")"
scripts/configure_dev.sh
scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum"
PACKAGES="$(go list ./... | grep -v /go-algorand/test/)"
export PACKAGE_NAMES=$(echo $PACKAGES | tr -d '\n')
- export PARTITION_TOTAL=$CIRCLE_NODE_TOTAL
- export PARTITION_ID=$CIRCLE_NODE_INDEX
+ export PARTITION_TOTAL=${CIRCLE_NODE_TOTAL}
+ export PARTITION_ID=${CIRCLE_NODE_INDEX}
export PARALLEL_FLAG="-p 1"
gotestsum --format testname --junitfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml --jsonfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json -- --tags "sqlite_unlock_notify sqlite_omit_load_extension" << parameters.short_test_flag >> -race -timeout 1h -coverprofile=coverage.txt -covermode=atomic -p 1 $PACKAGE_NAMES
- store_artifacts:
@@ -376,9 +334,12 @@ commands:
paths:
- << parameters.result_subdir >>
- save_cache:
- key: 'go-cache-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
+ key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
paths:
- tmp/go-cache
+ - upload_to_buildpulse:
+ platform: << parameters.platform >>
+ path: << parameters.result_path >>/<< parameters.result_subdir>>
upload_coverage:
description: Collect coverage reports and upload them
@@ -389,12 +350,48 @@ commands:
command: |
scripts/travis/upload_coverage.sh || true
+ upload_to_buildpulse:
+ description: Collect build reports and upload them
+ parameters:
+ platform:
+ type: string
+ path:
+ type: string
+ steps:
+ - run:
+ name: Send test results to BuildPulse
+ when: always
+ command: |
+ set -e
+ if ! ls << parameters.path >>/*/*.xml &> /dev/null; then exit 0; fi
+ sed -i"" -e 's/classname="/classname="<< parameters.platform >>-/' << parameters.path >>/*/*.xml
+ case "<< parameters.platform >>" in
+ arm64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-arm64
+ SUM=53f94c29ad162c2b9ebb1f4a2f967f5262c0459ee4a0c34332977d8c89aafc18
+ ;;
+ amd64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-amd64
+ SUM=4655e54d756580c0de0112cab488e6e08d0af75e9fc8caea2d63f9e13be8beb5
+ ;;
+ mac_amd64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-darwin-amd64
+ SUM=2f9e20a6f683c80f35d04e36bc57ecfe605bb48fee5a1b8d8f7c45094028eea3
+ ;;
+ esac
+ curl -fsSL --retry 3 --retry-connrefused $URL > ./buildpulse-test-reporter
+ echo "$SUM *buildpulse-test-reporter" | shasum -a 256 -c --status
+ chmod +x ./buildpulse-test-reporter
+ ./buildpulse-test-reporter submit << parameters.path >> --account-id 23182699 --repository-id 191266671 || true
+
generic_integration:
description: Run integration tests from build workspace, for re-use by diferent architectures
parameters:
- circleci_home:
+ platform:
type: string
- default: "/home/circleci"
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
result_subdir:
type: string
no_output_timeout:
@@ -405,31 +402,36 @@ commands:
default: ""
result_path:
type: string
- default: "/tmp/results"
+ default: << pipeline.parameters.result_path >>
steps:
- attach_workspace:
- at: << parameters.circleci_home >>
- - run: mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/$CIRCLE_NODE_INDEX
+ at: << parameters.build_dir >>
+ - run: |
+ mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
+ touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
+ touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- run:
name: Run integration tests
no_output_timeout: << parameters.no_output_timeout >>
command: |
set -x
- export PATH=$(echo "$PATH" | sed -e 's|:<< parameters.circleci_home >>/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
+ export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
export KMD_NOUSB=True
- export GOPATH="<< parameters.circleci_home >>/go"
+ export GOPATH="<< parameters.build_dir >>/go"
export PATH="${PATH}:${GOPATH}/bin"
export ALGORAND_DEADLOCK=enable
export BUILD_TYPE=integration
+ export GIMME_ENV_PREFIX=<< parameters.build_dir >>/.gimme/envs
+ export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions
GOLANG_VERSION=$(./scripts/get_golang_version.sh)
- eval "$(~/gimme "${GOLANG_VERSION}")"
+ eval "$(<< parameters.build_dir >>/gimme "${GOLANG_VERSION}")"
scripts/configure_dev.sh
scripts/buildtools/install_buildtools.sh -o "gotest.tools/gotestsum"
export ALGOTEST=1
export SHORTTEST=<< parameters.short_test_flag >>
- export TEST_RESULTS=<< parameters.result_path >>/<< parameters.result_subdir >>/$CIRCLE_NODE_INDEX
- export PARTITION_TOTAL=$CIRCLE_NODE_TOTAL
- export PARTITION_ID=$CIRCLE_NODE_INDEX
+ export TEST_RESULTS=<< parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
+ export PARTITION_TOTAL=${CIRCLE_NODE_TOTAL}
+ export PARTITION_ID=${CIRCLE_NODE_INDEX}
test/scripts/run_integration_tests.sh
- store_artifacts:
path: << parameters.result_path >>
@@ -440,13 +442,16 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
+ - upload_to_buildpulse:
+ platform: << parameters.platform >>
+ path: << parameters.result_path >>/<< parameters.result_subdir>>
tests_verification_command:
description: Check if all tests were run at least once and only once across all parallel runs
parameters:
result_path:
type: string
- default: "/tmp/results"
+ default: << pipeline.parameters.result_path >>
result_subdir:
type: string
steps:
@@ -461,11 +466,38 @@ commands:
path: << parameters.result_path >>/<< parameters.result_subdir >>
destination: << parameters.result_subdir >>/combined-test-results
+ upload_binaries_command:
+ description: save build artifacts for potential deployments
+ parameters:
+ platform:
+ type: string
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
+ steps:
+ - attach_workspace:
+ at: << parameters.build_dir >>
+ - run:
+ name: Upload binaries << parameters.platform >>
+ command: |
+ export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
+ export GOPATH="<< parameters.build_dir >>/go"
+ export NO_BUILD=true
+ export TRAVIS_BRANCH=${CIRCLE_BRANCH}
+ scripts/travis/deploy_packages.sh
+ - when:
+ condition:
+ equal: [ "amd64", << parameters.platform >> ]
+ steps:
+ - run:
+ name: test_release.sh
+ command: |
+ export TRAVIS_BRANCH=${CIRCLE_BRANCH}
+ scripts/travis/test_release.sh
+
jobs:
codegen_verification:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
+ executor: amd64_medium
steps:
- checkout
- prepare_go
@@ -474,406 +506,158 @@ jobs:
export GOPATH="/home/circleci/go"
scripts/travis/codegen_verification.sh
- amd64_build:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
+ build:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
steps:
+ - prepare_build_dir
- checkout
- prepare_go
- generic_build
- amd64_test:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
+ test:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
steps:
+ - prepare_build_dir
- prepare_go
- - generic_buildtest:
- result_subdir: amd64_test
+ - generic_test:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_test
short_test_flag: "-short"
- upload_coverage
- amd64_test_nightly:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
+ test_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
steps:
+ - prepare_build_dir
- prepare_go
- - generic_buildtest:
- result_subdir: amd64_test_nightly
+ - generic_test:
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_test_nightly
no_output_timeout: 45m
- upload_coverage
- - slack/notify:
+ - slack/notify: &slack-fail-event
event: fail
template: basic_fail_1
- amd64_integration:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
- parallelism: 4
- environment:
- E2E_TEST_FILTER: "GO"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_integration
- short_test_flag: "-short"
-
- amd64_integration_nightly:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
- parallelism: 4
- environment:
- E2E_TEST_FILTER: "GO"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_integration_nightly
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- amd64_e2e_subs:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: large
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_e2e_subs
- short_test_flag: "-short"
-
- amd64_e2e_subs_nightly:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: large
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_e2e_subs_nightly
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- amd64_e2e_expect:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "EXPECT"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_e2e_expect
- short_test_flag: "-short"
-
- amd64_e2e_expect_nightly:
- machine:
- image: ubuntu-2004:202104-01
- resource_class: medium
+ integration:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 2
environment:
- E2E_TEST_FILTER: "EXPECT"
- steps:
- - prepare_go
- - generic_integration:
- result_subdir: amd64_e2e_expect_nightly
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- arm64_build:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
- steps:
- - checkout
- - prepare_go
- - generic_build
-
- arm64_test:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
- parallelism: 4
- steps:
- - checkout
- - prepare_go
- - generic_buildtest:
- result_subdir: arm64_test
- short_test_flag: "-short"
- - upload_coverage
-
- arm64_test_nightly:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
- parallelism: 4
- steps:
- - checkout
- - prepare_go
- - generic_buildtest:
- result_subdir: arm64_test_nightly
- no_output_timeout: 45m
- - upload_coverage
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- arm64_integration:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
- parallelism: 4
- environment:
E2E_TEST_FILTER: "GO"
steps:
- - checkout
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: arm64_integration
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_integration
short_test_flag: "-short"
- arm64_integration_nightly:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
+ integration_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
environment:
E2E_TEST_FILTER: "GO"
steps:
- - checkout
- - prepare_go
- - generic_integration:
- result_subdir: arm64_integration_nightly
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- arm64_e2e_subs:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.large
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- steps:
- - checkout
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: arm64_e2e_subs
- short_test_flag: "-short"
-
- arm64_e2e_subs_nightly:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.large
- environment:
- E2E_TEST_FILTER: "SCRIPTS"
- steps:
- - checkout
- - prepare_go
- - generic_integration:
- result_subdir: arm64_e2e_subs_nightly
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_integration_nightly
no_output_timeout: 45m
- slack/notify:
- event: fail
- template: basic_fail_1
+ <<: *slack-fail-event
- arm64_e2e_expect:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
+ e2e_expect:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 2
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- - checkout
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: arm64_e2e_expect
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_expect
short_test_flag: "-short"
- arm64_e2e_expect_nightly:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
+ e2e_expect_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 2
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- - checkout
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: arm64_e2e_expect_nightly
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- mac_amd64_build:
- macos:
- xcode: 12.0.1
- resource_class: medium
- environment:
- HOMEBREW_NO_AUTO_UPDATE: "true"
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - checkout
- - prepare_go
- - generic_build:
- circleci_home: /Users/distiller
-
- mac_amd64_test:
- macos:
- xcode: 12.0.1
- resource_class: medium
- environment:
- HOMEBREW_NO_AUTO_UPDATE: "true"
- parallelism: 4
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_buildtest:
- result_subdir: mac_amd64_test
- circleci_home: /Users/distiller
- short_test_flag: "-short"
- - upload_coverage
-
- mac_amd64_test_nightly:
- macos:
- xcode: 12.0.1
- resource_class: medium
- environment:
- HOMEBREW_NO_AUTO_UPDATE: "true"
- parallelism: 4
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_buildtest:
- result_subdir: mac_amd64_test_nightly
- circleci_home: /Users/distiller
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform>>_e2e_expect_nightly
no_output_timeout: 45m
- - upload_coverage
- slack/notify:
- event: fail
- template: basic_fail_1
+ <<: *slack-fail-event
- mac_amd64_integration:
- macos:
- xcode: 12.0.1
- resource_class: medium
- parallelism: 4
- environment:
- E2E_TEST_FILTER: "GO"
- HOMEBREW_NO_AUTO_UPDATE: "true"
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_integration:
- result_subdir: mac_amd64_integration
- circleci_home: /Users/distiller
- short_test_flag: "-short"
-
- mac_amd64_integration_nightly:
- macos:
- xcode: 12.0.1
- resource_class: medium
- parallelism: 4
- environment:
- E2E_TEST_FILTER: "GO"
- HOMEBREW_NO_AUTO_UPDATE: "true"
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_integration:
- result_subdir: mac_amd64_integration_nightly
- circleci_home: /Users/distiller
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
-
- mac_amd64_e2e_subs:
- macos:
- xcode: 12.0.1
- resource_class: large
+ e2e_subs:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
environment:
E2E_TEST_FILTER: "SCRIPTS"
- HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: mac_amd64_e2e_subs
- circleci_home: /Users/distiller
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_subs
short_test_flag: "-short"
- mac_amd64_e2e_subs_nightly:
- macos:
- xcode: 12.0.1
- resource_class: large
+ e2e_subs_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_large
+ working_directory: << pipeline.parameters.build_dir >>/project
environment:
E2E_TEST_FILTER: "SCRIPTS"
- HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
+ - prepare_build_dir
- prepare_go
- generic_integration:
- result_subdir: mac_amd64_e2e_subs_nightly
- circleci_home: /Users/distiller
+ platform: << parameters.platform >>
+ result_subdir: << parameters.platform >>_e2e_subs_nightly
no_output_timeout: 45m
- slack/notify:
- event: fail
- template: basic_fail_1
-
- mac_amd64_e2e_expect:
- macos:
- xcode: 12.0.1
- resource_class: medium
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "EXPECT"
- HOMEBREW_NO_AUTO_UPDATE: "true"
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_integration:
- result_subdir: mac_amd64_e2e_expect
- circleci_home: /Users/distiller
- short_test_flag: "-short"
-
- mac_amd64_e2e_expect_nightly:
- macos:
- xcode: 12.0.1
- resource_class: medium
- parallelism: 2
- environment:
- E2E_TEST_FILTER: "EXPECT"
- HOMEBREW_NO_AUTO_UPDATE: "true"
- steps:
- #- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
- - prepare_go
- - generic_integration:
- result_subdir: mac_amd64_e2e_expect_nightly
- circleci_home: /Users/distiller
- no_output_timeout: 45m
- - slack/notify:
- event: fail
- template: basic_fail_1
+ <<: *slack-fail-event
windows_x64_build:
executor:
@@ -893,16 +677,32 @@ jobs:
export MAKE=mingw32-make
$msys2 scripts/travis/build_test.sh
shell: bash.exe
+
tests_verification_job:
docker:
- image: python:3.9.6-alpine
resource_class: small
+ working_directory: << pipeline.parameters.build_dir >>/project
parameters:
- job_type: # job_type: ["amd64", "arm64", "mac_amd64"]
+ platform: # platform: ["amd64", "arm64", "mac_amd64"]
type: string
- job_version: # job_version: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
+ job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
type: string
steps:
- checkout
- tests_verification_command:
- result_subdir: << parameters.job_type >>_<< parameters.job_version >>
+ result_subdir: << parameters.platform >>_<< parameters.job_type >>
+
+ upload_binaries:
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ steps:
+ - prepare_build_dir
+ - prepare_go
+ - upload_binaries_command:
+ platform: << parameters.platform >>
+ - slack/notify:
+ <<: *slack-fail-event
diff --git a/.travis.yml b/.travis.yml
index bae9d60a9..fa0457051 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,4 @@
+name: "Travis Windows build"
os: linux
dist: bionic
language: minimal
@@ -6,165 +7,32 @@ language: minimal
if: tag IS blank
stages:
- - name: build_commit
- if: NOT (branch =~ /^hotfix\//) AND NOT (branch =~ /^rel\//) AND type != pull_request
- - name: build_pr
+ - name: build_windows_pr
if: type = pull_request
- - name: build_release
+ - name: build_windows_release
if: (branch =~ /^hotfix\// OR branch =~ /^rel\//) AND type != pull_request
- - name: deploy
- if: branch =~ /^rel\// AND type != pull_request
- - name: post_deploy
- if: branch =~ /^rel\// AND type != pull_request
jobs:
- allow_failures:
- - name: External ARM64 Deploy
- - name: External ARM64 Integration Test
- - name: External ARM Build
- - name: External ARM Deploy
- - name: Test Release Builds
include:
- - stage: build_commit
- os: linux
+ - stage: build_windows_pr
+ os: windows
+ name: Windows x64 PR Build
+ cache:
+ directories:
+ - $HOME/AppData/Local/Temp/chocolatey
+ - /C/tools/msys64
script:
- - scripts/travis/build_test.sh
+ - $mingw64 scripts/travis/build_test.sh
- - stage: build_pr
- os: linux
- name: Ubuntu AMD64 Build
+ - stage: build_windows_release
+ os: windows
+ name: Windows x64 Release Build
+ cache:
+ directories:
+ - $HOME/AppData/Local/Temp/chocolatey
+ - /C/tools/msys64
script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: linux
- name: Ubuntu AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- name: External ARM64 Build
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/build_test.sh
- - # same stage, parallel job
- name: External ARM64 Integration Test
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Build
- script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: windows
- name: Windows x64 Build
- cache:
- directories:
- - $HOME/AppData/Local/Temp/chocolatey
- - /C/tools/msys64
- script:
- - $mingw64 scripts/travis/build_test.sh
-
- - stage: build_release
- os: linux
- name: Ubuntu AMD64 Build
- script:
- - ./scripts/travis/build_test.sh
- - # same stage, parallel job
- os: linux
- name: Ubuntu AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- name: External ARM64 Build
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/build_test.sh
- - # same stage, parallel job
- name: External ARM64 Integration Test
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Build
- script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: windows
- name: Windows x64 Build
- cache:
- directories:
- - $HOME/AppData/Local/Temp/chocolatey
- - /C/tools/msys64
- script:
- - $mingw64 scripts/travis/build_test.sh
-
- - stage: deploy
- name: Ubuntu Deploy
- os: linux
- script:
- - scripts/travis/deploy_packages.sh
- - scripts/travis/test_release.sh
- - # same stage, parallel job
- name: MacOS Deploy
- os: osx
- osx_image: xcode11
- script: scripts/travis/deploy_packages.sh
- - # same stage, parallel job
- name: External ARM64 Deploy
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/deploy_packages.sh
+ - $mingw64 scripts/travis/build_test.sh
# Don't rebuild libsodium every time
cache:
@@ -174,38 +42,26 @@ cache:
before_install:
- |-
- case $TRAVIS_OS_NAME in
- linux)
- # Disable sometimes-broken sources.list in Travis base images
- sudo rm -vf /etc/apt/sources.list.d/*
- ;;
- windows)
- [[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
- choco uninstall -y mingw
- choco upgrade --no-progress -y msys2
- export msys2='cmd //C RefreshEnv.cmd '
- export msys2+='& set MSYS=winsymlinks:nativestrict '
- export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start'
- export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
- export msys2+=" -msys2 -c "\"\$@"\" --"
- $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain unzip
- ## Install more MSYS2 packages from https://packages.msys2.org/base here
- taskkill //IM gpg-agent.exe //F # https://travis-ci.community/t/4967
- export PATH=/C/tools/msys64/mingw64/bin:$PATH
- export MAKE=mingw32-make # so that Autotools can find it
- ;;
- esac
- docker load -i $HOME/docker_cache/images.tar || true
+ [[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
+ choco uninstall -y mingw
+ choco upgrade --no-progress -y msys2
+ export msys2='cmd //C RefreshEnv.cmd '
+ export msys2+='& set MSYS=winsymlinks:nativestrict '
+ export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start'
+ export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
+ export msys2+=" -msys2 -c "\"\$@"\" --"
+ $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain unzip
+ ## Install more MSYS2 packages from https://packages.msys2.org/base here
+ taskkill //IM gpg-agent.exe //F # https://travis-ci.community/t/4967
+ export PATH=/C/tools/msys64/mingw64/bin:$PATH
+ export MAKE=mingw32-make # so that Autotools can find it
+ docker load -i $HOME/docker_cache/images.tar || true
before_cache:
- |-
- case $TRAVIS_OS_NAME in
- windows)
- # https://unix.stackexchange.com/a/137322/107554
- $msys2 pacman --sync --clean --noconfirm
- ;;
- esac
- docker save -o $HOME/docker_cache/images.tar $(docker images -a -q)
+ # https://unix.stackexchange.com/a/137322/107554
+ $msys2 pacman --sync --clean --noconfirm
+ docker save -o $HOME/docker_cache/images.tar $(docker images -a -q)
addons:
apt:
@@ -225,8 +81,4 @@ notifications:
slack:
if: branch = "rel/nightly"
rooms:
- - secure: jx2gllL8A8QXkV/wVpfKBRfK7cMQkuHOxxFmN2eYEGR8AR6bAGfo5HVmb6VKBRpwVzOgXwhvjbxNdYlmRkKXeJ5o0DDDUG6YSKIrNkA38JulNKzdG3K3d6DoGJV3Iturp6O9W/IdweRSz2rjJsHP1RpYG3w39giSNTMR/Q4ent68CNoYHC3XEsUHmwGVA8bQpfu2AKfr98twgTzmTcvmx93B/ZHlR8GoLh+Vq2eAwiYuyYnoNYww3ekttqeqAr21X5xTjQyAwntnk1gfRKbdiEXcxlnNKCQn3yaD7qNorJZm9U/fhGiA1eLHOxtLLJD8HSEzLvNj9gShgw/YPaYjZJLZJe0kDpR4oFwVd+lVzBManVGOwfHJIsug8dnVxY1O/PLHPV4iM4tyGwcZfxSLOXsutN3PCJhYjKUrKaJ75+L5d1tXe3DoXtGH5e/GV2kGxWr25woAZrKCeBOPFZdhAc30XLOzTk3FN3hzRKVbL9O4zxFgJllOvQTWDQ9bmXxa3bk6cDwxaF8IJk+hP1k3VaOdaxmd28pnyR/X7xRF0l1W8S6SZfDFXAFBvVyH/Q1nfvXAez//cK0He959reubjUGM8HaSWw73n9Bj5Ri4ub92xfJqSAZItI5dS1hjfdh7moS50dYbBwSozgOgWdFXHdzGNnCv+/8v0LJcnSPRdT8=
-
-env:
- global:
- secure: YMLx+QaBPgg97tVn2efZJSqwPRxybX9oR6Xe9oXUD4fEiVbOIv2BAsLP+MzSJGaFC+VXKha83kyDbAZnatDXTJYsBPOUgmIZDObQnktueR1v3TCKn3aw14G+/w6S4eQL+c7uW3idPE+KP3VWixjrBpWTHEfnZdnH9+qWeIWzy8A8UEAYPY/HXyDmwt5ceCJoITtdsuiuZNO2o7NGN8q+NP5A3ii8f5UyDa9/krxuA+x1e193wSGRdGcqxexM7zIq0uMhBF+2nOk4RAsDsqU33NpQw+dB1VJMta1XF+P6A7m16UKUjllTyOfUPGD068jKobVyWzbvZ79G2FZXGZ7HGE0711JZco3YenwtxXP8oGLgaIds6McoJnn3rbxe1i+PjRX09IXNQM/dNp53uxrtyi1y8ZEjzHjbb6z3rSOZrzaBhdVxBFR9Oc1Ek7i8MS1tlRlH58+U+Z1WPTvbcIQtFA5HmmTcDdBRkZtPC8bpVLHOZTGUcple9k39VtOGuSCvAAfHKENhieCaw/bBXX4bxAHAfiI0NEmdDrULDJLApWjtRCrUIqIqkdsJmtiZhE+iX1FKR9CP5lBXCAwDlPB3XOe0bSmiiGQ1/sf/1CovqOBg/vLwCpjQE0IZPofCiDZZdZbv4n4aCOjiAqhS8MjJ/LfLyUuAtPmQG5g47FzS1K4=
+ - secure: FvM+GHkfVQyJqounsRO8kcQT2fYljdBp/fODIlgNE1pcd08WKqHa14cpjQN2T/zC7/eeJAwDuk4ADUqgLdb85vSd6KgcYwBaPMLj8b1La/JY17ULpVuD1uke3uDb+OhC084Uo7cYUAcmpPOtX7ujRVXbeRFoj5R6xqyCYEBGeRNaQIJh36Jd1jkKjQGZgcQIMy0JZXTnVwxP1o3ed6mYtVMt0qlBCzHKXTr7CUTFp+TwFoZ99jxvjNEA6cpn1UE82uq08CiQ4ANY51SCMqq6L9y76SRREznjtR3CGGhZYlmewVnlsco6IB0Xe5k4j8GlIlZfV+bJiBaE4GqNKOpt0DcB/hxZtDbe7LcSfdtwU/NG8L1rw4Ktj+ALcEJDuprRhYaafEaL6iOVsOy6YwW2Tbj6xcJzeorTs/p0URkSJ/M1Us7zdOTzLkVeUKAcHIIl4SClHq3svzSwoSyjlwuq18Wp2VWNfQFY3qFd7eZo/meM8UuhRrTai18eX5ZJ9leAdwvwfma1uxS2ZlG8aDupx2Ww19AOZZ8txQtDTn+rzP8U1UnlaOMJStOmdEKfExGy348f1yQcJtOv4armZuEuC6eoIkeznNS8wO/ZkRSWuxDcc5Q1DsQADXfGSMEL36Aa6aDU5Hy7els00qYDImPVL+7rd2pAfSUzFCfA+7klefg=
diff --git a/Makefile b/Makefile
index 50613c6da..b17010bfe 100644
--- a/Makefile
+++ b/Makefile
@@ -76,7 +76,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... ))
-MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/compactcert ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./compactcert
+MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/compactcert ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./compactcert ./data/account
default: build
diff --git a/README.md b/README.md
index 8c6fa230a..1f9045adf 100644
--- a/README.md
+++ b/README.md
@@ -105,7 +105,7 @@ daemons, as well as other tools and commands:
about spending keys, protocols keys, one-time-use signing keys, and how they
relate to each other.
- `config` holds configuration parameters. These include parameters used
- locally by the node as well as parameters which must be agreed upon by the
+ locally by the node as well as parameters that must be agreed upon by the
protocol.
- `data` defines various types used throughout the codebase.
- `basics` hold basic types such as MicroAlgos, account data, and
diff --git a/agreement/abstractions.go b/agreement/abstractions.go
index 99bd45d98..390b32dfd 100644
--- a/agreement/abstractions.go
+++ b/agreement/abstractions.go
@@ -19,7 +19,6 @@ package agreement
import (
"context"
"errors"
- "time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -74,10 +73,7 @@ var ErrAssembleBlockRoundStale = errors.New("requested round for AssembleBlock i
// Round.
type BlockFactory interface {
// AssembleBlock produces a new ValidatedBlock which is suitable for proposal
- // at a given Round. The time argument specifies a target deadline by
- // which the block should be produced. Specifically, the deadline can
- // cause the factory to add fewer transactions to the block in question
- // than might otherwise be possible.
+ // at a given Round.
//
// AssembleBlock should produce a ValidatedBlock for which the corresponding
// BlockValidator validates (i.e. for which BlockValidator.Validate
@@ -88,7 +84,7 @@ type BlockFactory interface {
// produce a ValidatedBlock for the given round. If an insufficient number of
// nodes on the network can assemble entries, the agreement protocol may
// lose liveness.
- AssembleBlock(basics.Round, time.Time) (ValidatedBlock, error)
+ AssembleBlock(basics.Round) (ValidatedBlock, error)
}
// A Ledger represents the sequence of Entries agreed upon by the protocol.
@@ -128,14 +124,14 @@ type LedgerReader interface {
// protocol may lose liveness.
Seed(basics.Round) (committee.Seed, error)
- // Lookup returns the AccountData associated with some Address
- // at the conclusion of a given round.
+ // LookupAgreement returns the AccountData associated with some Address
+ // needed by agreement at the conclusion of a given round.
//
// This method returns an error if the given Round has not yet been
// confirmed. It may also return an error if the given Round is
// unavailable by the storage device. In that case, the agreement
// protocol may lose liveness.
- Lookup(basics.Round, basics.Address) (basics.AccountData, error)
+ LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error)
// Circulation returns the total amount of money in circulation at the
// conclusion of a given round.
@@ -229,6 +225,10 @@ type KeyManager interface {
// valid for the provided votingRound, and were available at
// keysRound.
VotingKeys(votingRound, keysRound basics.Round) []account.Participation
+
+ // Record indicates that the given participation action has been taken.
+ // The operation needs to be asynchronous to avoid impacting agreement.
+ Record(account basics.Address, round basics.Round, participationType account.ParticipationAction)
}
// MessageHandle is an ID referring to a specific message.
diff --git a/agreement/agreementtest/keyManager.go b/agreement/agreementtest/keyManager.go
index 384fba8cd..340c8f40e 100644
--- a/agreement/agreementtest/keyManager.go
+++ b/agreement/agreementtest/keyManager.go
@@ -21,7 +21,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
)
-// SimpleKeyManager provides a simple implementation of a KeyManager.
+// SimpleKeyManager provides a simple implementation of a KeyManager for unit tests.
type SimpleKeyManager []account.Participation
// VotingKeys implements KeyManager.VotingKeys.
@@ -37,7 +37,8 @@ func (m SimpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Part
// DeleteOldKeys implements KeyManager.DeleteOldKeys.
func (m SimpleKeyManager) DeleteOldKeys(r basics.Round) {
- // for _, acc := range m {
- // acc.DeleteOldKeys(r)
- // }
+}
+
+// Record implements KeyManager.Record.
+func (m SimpleKeyManager) Record(account basics.Address, round basics.Round, action account.ParticipationAction) {
}
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 086349b82..c2dcbe233 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -92,7 +92,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (agreement.ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -203,7 +203,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -211,7 +211,7 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
err := fmt.Errorf("Lookup called on future round: %v > %v! (this is probably a bug)", r, l.nextRound)
panic(err)
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -226,7 +226,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
diff --git a/agreement/asyncVoteVerifier.go b/agreement/asyncVoteVerifier.go
index 59bb90b09..072fb2f15 100644
--- a/agreement/asyncVoteVerifier.go
+++ b/agreement/asyncVoteVerifier.go
@@ -100,7 +100,7 @@ func (avv *AsyncVoteVerifier) executeVoteVerification(task interface{}) interfac
select {
case <-req.ctx.Done():
// request cancelled, return an error response on the channel
- return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req}
+ return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req, index: req.index}
default:
// request was not cancelled, so we verify it here and return the result on the channel
v, err := req.uv.verify(req.l)
@@ -119,7 +119,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
select {
case <-req.ctx.Done():
// request cancelled, return an error response on the channel
- return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req}
+ return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req, index: req.index}
default:
// request was not cancelled, so we verify it here and return the result on the channel
ev, err := req.uev.verify(req.l)
@@ -131,7 +131,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
}
}
-func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) {
+func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -140,16 +140,18 @@ func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader,
// if we're done while waiting for room in the requests channel, don't queue the request
req := asyncVerifyVoteRequest{ctx: verctx, l: l, uv: &uv, index: index, message: message, out: out}
avv.wg.Add(1)
- if avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeVoteVerification, req, avv.execpoolOut) != nil {
+ if err := avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeVoteVerification, req, avv.execpoolOut); err != nil {
// we want to call "wg.Done()" here to "fix" the accounting of the number of pending tasks.
// if we got a non-nil, it means that our context has expired, which means that we won't see this task
// getting to the verification function.
avv.wg.Done()
+ return err
}
}
+ return nil
}
-func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) {
+func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -158,13 +160,15 @@ func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReade
// if we're done while waiting for room in the requests channel, don't queue the request
req := asyncVerifyVoteRequest{ctx: verctx, l: l, uev: &uev, index: index, message: message, out: out}
avv.wg.Add(1)
- if avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeEqVoteVerification, req, avv.execpoolOut) != nil {
+ if err := avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeEqVoteVerification, req, avv.execpoolOut); err != nil {
// we want to call "wg.Done()" here to "fix" the accounting of the number of pending tasks.
// if we got a non-nil, it means that our context has expired, which means that we won't see this task
// getting to the verification function.
avv.wg.Done()
+ return err
}
}
+ return nil
}
// Quit tells the AsyncVoteVerifier to shutdown and waits until all workers terminate.
diff --git a/agreement/asyncVoteVerifier_test.go b/agreement/asyncVoteVerifier_test.go
new file mode 100644
index 000000000..6cfadedd8
--- /dev/null
+++ b/agreement/asyncVoteVerifier_test.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+type expiredExecPool struct {
+ execpool.ExecutionPool
+}
+
+func (fp *expiredExecPool) EnqueueBacklog(enqueueCtx context.Context, t execpool.ExecFunc, arg interface{}, out chan interface{}) error {
+ // generate an error, to see if we correctly report that on the verifyVote() call.
+ return context.Canceled
+}
+
+// Test async vote verifier against a full execution pool.
+func TestVerificationAgainstFullExecutionPool(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+ verifyErr := voteVerifier.verifyVote(context.Background(), nil, unauthenticatedVote{}, 0, message{}, make(chan<- asyncVerifyVoteResponse, 1))
+ require.Error(t, context.Canceled, verifyErr)
+ verifyEqVoteErr := voteVerifier.verifyEqVote(context.Background(), nil, unauthenticatedEquivocationVote{}, 0, message{}, make(chan<- asyncVerifyVoteResponse, 1))
+ require.Error(t, context.Canceled, verifyEqVoteErr)
+}
diff --git a/agreement/common_test.go b/agreement/common_test.go
index ff453c1f6..9ecf5b4b9 100644
--- a/agreement/common_test.go
+++ b/agreement/common_test.go
@@ -21,7 +21,6 @@ import (
"fmt"
"math/rand"
"testing"
- "time"
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
@@ -180,7 +179,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -320,7 +319,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -330,10 +329,10 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
}
if l.maxNumBlocks != 0 && r+round(l.maxNumBlocks) < l.nextRound {
- return basics.AccountData{}, &LedgerDroppedRoundError{}
+ return basics.OnlineAccountData{}, &LedgerDroppedRoundError{}
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -348,7 +347,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
@@ -422,7 +421,7 @@ type testAccountData struct {
}
func makeProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
- ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(round)
if err != nil {
logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
@@ -534,7 +533,7 @@ func (v *voteMakerHelper) MakeRandomProposalValue() *proposalValue {
func (v *voteMakerHelper) MakeRandomProposalPayload(t *testing.T, r round) (*proposal, *proposalValue) {
f := testBlockFactory{Owner: 1}
- ve, err := f.AssembleBlock(r, time.Now().Add(time.Minute))
+ ve, err := f.AssembleBlock(r)
require.NoError(t, err)
var payload unauthenticatedProposal
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index 0c79496de..4f84ca3ba 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -205,7 +205,14 @@ func (c *poolCryptoVerifier) voteFillWorker(toBundleWait chan<- bundleFuture) {
}
uv := votereq.message.UnauthenticatedVote
- c.voteVerifier.verifyVote(votereq.ctx, c.ledger, uv, votereq.TaskIndex, votereq.message, c.votes.out)
+ err := c.voteVerifier.verifyVote(votereq.ctx, c.ledger, uv, votereq.TaskIndex, votereq.message, c.votes.out)
+ if err != nil && c.votes.out != nil {
+ select {
+ case c.votes.out <- asyncVerifyVoteResponse{index: votereq.TaskIndex, err: err, cancelled: true}:
+ default:
+ c.log.Infof("poolCryptoVerifier.voteFillWorker unable to write failed enqueue response to output channel")
+ }
+ }
case bundlereq, ok := <-bundlesin:
if !ok {
bundlesin = nil
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index bced9c9f7..78e6f6488 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -35,6 +36,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
)
var _ = fmt.Printf
@@ -314,7 +316,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
pn := &asyncPseudonode{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
- keys: simpleKeyManager(participations),
+ keys: makeRecordingKeyManager(participations),
ledger: ledger,
log: serviceLogger{logging.Base()},
}
@@ -385,3 +387,26 @@ func BenchmarkCryptoVerifierBundleVertification(b *testing.B) {
<-c
}
}
+
+// TestCryptoVerifierVerificationFailures tests to see that the cryptoVerifier.VerifyVote returns an error in the vote response
+// when being unable to enqueue a vote.
+func TestCryptoVerifierVerificationFailures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+
+ cryptoVerifier := makeCryptoVerifier(nil, nil, voteVerifier, logging.TestingLog(t))
+ defer cryptoVerifier.Quit()
+
+ cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: 14})
+ // read the failed response from VerifiedVotes:
+ votesout := cryptoVerifier.VerifiedVotes()
+ voteResponse := <-votesout
+ require.Equal(t, context.Canceled, voteResponse.err)
+ require.True(t, voteResponse.cancelled)
+ require.Equal(t, 14, voteResponse.index)
+}
diff --git a/agreement/demux_test.go b/agreement/demux_test.go
index f099d79f8..e351b9a79 100644
--- a/agreement/demux_test.go
+++ b/agreement/demux_test.go
@@ -484,9 +484,9 @@ func (t *demuxTester) LookupDigest(basics.Round) (crypto.Digest, error) {
}
// implement Ledger
-func (t *demuxTester) Lookup(basics.Round, basics.Address) (basics.AccountData, error) {
+func (t *demuxTester) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) {
// we don't care about this function in this test.
- return basics.AccountData{}, nil
+ return basics.OnlineAccountData{}, nil
}
// implement Ledger
diff --git a/agreement/fuzzer/fuzzer_test.go b/agreement/fuzzer/fuzzer_test.go
index 8c526b13d..c6cc91be7 100644
--- a/agreement/fuzzer/fuzzer_test.go
+++ b/agreement/fuzzer/fuzzer_test.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/agreement/agreementtest"
"github.com/algorand/go-algorand/agreement/gossip"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -132,7 +133,7 @@ func (n *Fuzzer) initAgreementNode(nodeID int, filters ...NetworkFilterFactory)
Logger: logger,
Ledger: n.ledgers[nodeID],
Network: gossip.WrapNetwork(n.facades[nodeID], logger),
- KeyManager: simpleKeyManager(n.accounts[nodeID : nodeID+1]),
+ KeyManager: agreementtest.SimpleKeyManager(n.accounts[nodeID : nodeID+1]),
BlockValidator: n.blockValidator,
BlockFactory: testBlockFactory{Owner: nodeID},
Clock: n.clocks[nodeID],
diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go
index 15a3fbebe..9866f53c4 100644
--- a/agreement/fuzzer/ledger_test.go
+++ b/agreement/fuzzer/ledger_test.go
@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"math/rand"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -109,7 +108,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (agreement.ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -226,7 +225,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -234,7 +233,7 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
err := fmt.Errorf("Lookup called on future round: %d >= %d! (this is probably a bug)", r, l.nextRound)
panic(err)
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -249,7 +248,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
diff --git a/agreement/keyManager_test.go b/agreement/keyManager_test.go
new file mode 100644
index 000000000..f992e01f0
--- /dev/null
+++ b/agreement/keyManager_test.go
@@ -0,0 +1,74 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "testing"
+
+ "github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+func makeRecordingKeyManager(accounts []account.Participation) *recordingKeyManager {
+ return &recordingKeyManager{
+ keys: accounts,
+ recording: make(map[basics.Address]map[account.ParticipationAction]basics.Round),
+ }
+}
+
+// recordingKeyManager provides a simple implementation of a KeyManager for unit tests.
+type recordingKeyManager struct {
+ keys []account.Participation
+ recording map[basics.Address]map[account.ParticipationAction]basics.Round
+ mutex deadlock.Mutex
+}
+
+// VotingKeys implements KeyManager.VotingKeys.
+func (m *recordingKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
+ var km []account.Participation
+ for _, acc := range m.keys {
+ if acc.OverlapsInterval(votingRound, votingRound) {
+ km = append(km, acc)
+ }
+ }
+ return km
+}
+
+// DeleteOldKeys implements KeyManager.DeleteOldKeys.
+func (m *recordingKeyManager) DeleteOldKeys(r basics.Round) {
+}
+
+// Record implements KeyManager.Record.
+func (m *recordingKeyManager) Record(acct basics.Address, round basics.Round, action account.ParticipationAction) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if _, ok := m.recording[acct]; !ok {
+ m.recording[acct] = make(map[account.ParticipationAction]basics.Round)
+ }
+ m.recording[acct][action] = round
+}
+
+// ValidateVoteRound requires that the given address voted on a particular round.
+func (m *recordingKeyManager) ValidateVoteRound(t *testing.T, address basics.Address, round basics.Round) {
+ m.mutex.Lock()
+ require.Equal(t, round, m.recording[address][account.Vote])
+ require.Equal(t, round, m.recording[address][account.BlockProposal])
+ m.mutex.Unlock()
+}
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 61db86db6..4eb67496e 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -6,6 +6,7 @@ import (
"sort"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/msgp/msgp"
@@ -1354,120 +1355,124 @@ func (z period) MsgIsZero() bool {
func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(27)
- var zb0003Mask uint32 /* 32 bits */
+ zb0004Len := uint32(28)
+ var zb0004Mask uint64 /* 34 bits */
if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
+ }
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
@@ -1487,132 +1492,144 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -1630,214 +1647,214 @@ func (_ *proposal) CanMarshalMsg(z interface{}) bool {
func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -1851,44 +1868,73 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0007)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1899,11 +1945,11 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = proposal{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -2043,27 +2089,27 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -2076,6 +2122,33 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -2090,13 +2163,13 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -2132,13 +2205,17 @@ func (z *proposal) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).unauthenticatedProposal.Block.Payset.Msgsize() + 5 + (*z).unauthenticatedProposal.SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).unauthenticatedProposal.OriginalProposer.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *proposal) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -3017,124 +3094,128 @@ func (z step) MsgIsZero() bool {
func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(28)
- var zb0003Mask uint64 /* 33 bits */
+ zb0004Len := uint32(29)
+ var zb0004Mask uint64 /* 35 bits */
if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x20000
+ }
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).PriorVote.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000000
+ zb0004Len--
+ zb0004Mask |= 0x200000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x100000000
+ zb0004Len--
+ zb0004Mask |= 0x400000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
@@ -3154,137 +3235,149 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "pv"
o = append(o, 0xa2, 0x70, 0x76)
o = (*z).PriorVote.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -3302,214 +3395,214 @@ func (_ *transmittedPayload) CanMarshalMsg(z interface{}) bool {
func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -3523,52 +3616,81 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0007)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).PriorVote.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "PriorVote")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -3579,11 +3701,11 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = transmittedPayload{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -3723,27 +3845,27 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -3756,6 +3878,33 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -3770,13 +3919,13 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -3818,13 +3967,17 @@ func (z *transmittedPayload) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).unauthenticatedProposal.Block.Payset.Msgsize() + 5 + (*z).unauthenticatedProposal.SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).unauthenticatedProposal.OriginalProposer.Msgsize() + 3 + (*z).PriorVote.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *transmittedPayload) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -4494,120 +4647,124 @@ func (z *unauthenticatedEquivocationVote) MsgIsZero() bool {
func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(27)
- var zb0003Mask uint32 /* 31 bits */
+ zb0004Len := uint32(28)
+ var zb0004Mask uint64 /* 33 bits */
if len((*z).Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
+ }
+ if len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).Block.BlockHeader.CompactCert == nil {
@@ -4627,132 +4784,144 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).OriginalPeriod))
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -4770,214 +4939,214 @@ func (_ *unauthenticatedProposal) CanMarshalMsg(z interface{}) bool {
func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).Block.BlockHeader.CompactCert = nil
} else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -4991,44 +5160,73 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
(*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0007)
+ (*z).OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -5039,11 +5237,11 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = unauthenticatedProposal{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -5183,27 +5381,27 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).Block.BlockHeader.CompactCert = nil
} else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -5216,6 +5414,33 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
(*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -5230,13 +5455,13 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0010)
+ (*z).OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).OriginalProposer.UnmarshalMsg(bts)
@@ -5272,13 +5497,17 @@ func (z *unauthenticatedProposal) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).Block.Payset.Msgsize() + 5 + (*z).SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).OriginalProposer.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *unauthenticatedProposal) MsgIsZero() bool {
- return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.CompactCert) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
+ return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.CompactCert) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/agreement/player_permutation_test.go b/agreement/player_permutation_test.go
index 2b832c63b..960669483 100644
--- a/agreement/player_permutation_test.go
+++ b/agreement/player_permutation_test.go
@@ -19,7 +19,6 @@ package agreement
import (
"fmt"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -32,7 +31,7 @@ import (
func makeRandomProposalPayload(r round) *proposal {
f := testBlockFactory{Owner: 1}
- ve, _ := f.AssembleBlock(r, time.Time{})
+ ve, _ := f.AssembleBlock(r)
var payload unauthenticatedProposal
payload.Block = ve.Block()
diff --git a/agreement/proposal.go b/agreement/proposal.go
index e823ce0ce..f5256decb 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -184,7 +184,7 @@ func verifyNewSeed(p unauthenticatedProposal, ledger LedgerReader) error {
}
balanceRound := balanceRound(rnd, cparams)
- proposerRecord, err := ledger.Lookup(balanceRound, value.OriginalProposer)
+ proposerRecord, err := ledger.LookupAgreement(balanceRound, value.OriginalProposer)
if err != nil {
return fmt.Errorf("failed to obtain balance record for address %v in round %d: %v", value.OriginalProposer, balanceRound, err)
}
diff --git a/agreement/proposalStore_test.go b/agreement/proposalStore_test.go
index 93f2d15b4..2333b0344 100644
--- a/agreement/proposalStore_test.go
+++ b/agreement/proposalStore_test.go
@@ -20,7 +20,6 @@ import (
"os"
"reflect"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -65,7 +64,7 @@ func TestBlockAssemblerPipeline(t *testing.T) {
round := player.Round
period := player.Period
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
accountIndex := 0
@@ -133,7 +132,7 @@ func TestBlockAssemblerBind(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
@@ -201,7 +200,7 @@ func TestBlockAssemblerAuthenticator(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -267,7 +266,7 @@ func TestBlockAssemblerTrim(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -340,7 +339,7 @@ func TestProposalStoreT(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -414,7 +413,7 @@ func TestProposalStoreUnderlying(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -478,7 +477,7 @@ func TestProposalStoreHandle(t *testing.T) {
proposalVoteEventBatch, proposalPayloadEventBatch, _ := generateProposalEvents(t, player, accounts, factory, ledger)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
_, proposalV0, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -662,7 +661,7 @@ func TestProposalStoreGetPinnedValue(t *testing.T) {
// create proposal Store
player, router, accounts, factory, ledger := testPlayerSetup()
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
// create a route handler for the proposal store
diff --git a/agreement/proposal_test.go b/agreement/proposal_test.go
index 49481ae84..24ddfbfd1 100644
--- a/agreement/proposal_test.go
+++ b/agreement/proposal_test.go
@@ -20,7 +20,6 @@ import (
"context"
"os"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -47,7 +46,7 @@ func testSetup(periodCount uint64) (player, rootRouter, testAccountData, testBlo
}
func createProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
- ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(round)
if err != nil {
logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
@@ -123,7 +122,7 @@ func TestProposalFunctions(t *testing.T) {
player, _, accs, factory, ledger := testSetup(0)
round := player.Round
period := player.Period
- ve, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
@@ -163,7 +162,7 @@ func TestProposalUnauthenticated(t *testing.T) {
round := player.Round
period := player.Period
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index e2809fb13..2589028cb 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -18,11 +18,11 @@ package agreement
import (
"context"
+ "errors"
"fmt"
"sync"
"time"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
@@ -33,13 +33,14 @@ import (
// TODO put these in config
const (
- pseudonodeVerificationBacklog = 32
+ pseudonodeVerificationBacklog = 32
+ maxPseudonodeOutputWaitDuration = 2 * time.Second
)
var errPseudonodeBacklogFull = fmt.Errorf("pseudonode input channel is full")
-var errPseudonodeVerifierClosedChannel = fmt.Errorf("crypto verifier closed the output channel prematurely")
-var errPseudonodeNoVotes = fmt.Errorf("no valid participation keys to generate votes for given round")
-var errPseudonodeNoProposals = fmt.Errorf("no valid participation keys to generate proposals for given round")
+var errPseudonodeVerifierClosedChannel = errors.New("crypto verifier closed the output channel prematurely")
+var errPseudonodeNoVotes = errors.New("no valid participation keys to generate votes for given round")
+var errPseudonodeNoProposals = errors.New("no valid participation keys to generate proposals for given round")
// A pseudonode creates proposals and votes with a KeyManager which holds participation keys.
//
@@ -174,7 +175,7 @@ func (n asyncPseudonode) MakeProposals(ctx context.Context, r round, p period) (
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
- return nil, errPseudonodeBacklogFull
+ return nil, fmt.Errorf("unable to make proposal for (%d, %d): %w", r, p, errPseudonodeBacklogFull)
}
}
@@ -191,7 +192,7 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
- return nil, errPseudonodeBacklogFull
+ return nil, fmt.Errorf("unable to make vote for (%d, %d, %d): %w", r, p, s, errPseudonodeBacklogFull)
}
}
@@ -267,8 +268,7 @@ func (n asyncPseudonode) makePseudonodeVerifier(voteVerifier *AsyncVoteVerifier)
// makeProposals creates a slice of block proposals for the given round and period.
func (n asyncPseudonode) makeProposals(round basics.Round, period period, accounts []account.Participation) ([]proposal, []unauthenticatedVote) {
- deadline := time.Now().Add(config.ProposalAssemblyTime)
- ve, err := n.factory.AssembleBlock(round, deadline)
+ ve, err := n.factory.AssembleBlock(round)
if err != nil {
if err != ErrAssembleBlockRoundStale {
n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %d: %v", round, err)
@@ -367,13 +367,20 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
unverifiedVotes := t.node.makeVotes(t.round, t.period, t.step, t.prop, t.participation)
t.node.log.Infof("pseudonode: made %v votes", len(unverifiedVotes))
results := make(chan asyncVerifyVoteResponse, len(unverifiedVotes))
+ orderedResults := make([]asyncVerifyVoteResponse, len(unverifiedVotes))
+ asyncVerifyingVotes := len(unverifiedVotes)
for i, uv := range unverifiedVotes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ if err != nil {
+ orderedResults[i].err = err
+ t.node.log.Infof("pseudonode.makeVotes: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
+ asyncVerifyingVotes--
+ continue
+ }
}
- orderedResults := make([]asyncVerifyVoteResponse, len(unverifiedVotes))
- for i := 0; i < len(unverifiedVotes); i++ {
+ for i := 0; i < asyncVerifyingVotes; i++ {
resp := <-results
orderedResults[resp.index] = resp
}
@@ -440,15 +447,26 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
}
t.node.monitor.dec(pseudonodeCoserviceType)
+ outputTimeout := time.After(maxPseudonodeOutputWaitDuration)
+
// push results into channel.
+verifiedVotesLoop:
for _, r := range verifiedResults {
- select {
- case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
+ t.node.keys.Record(r.v.R.Sender, r.v.R.Round, account.Vote)
+ continue verifiedVotesLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeVotes: unable to write vote to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
}
@@ -477,13 +495,20 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
// For now, don't log at all, and revisit when the metric becomes more important.
results := make(chan asyncVerifyVoteResponse, len(votes))
+ cryptoOutputs := make([]asyncVerifyVoteResponse, len(votes))
+ asyncVerifyingVotes := len(votes)
for i, uv := range votes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ if err != nil {
+ cryptoOutputs[i].err = err
+ t.node.log.Infof("pseudonode.makeProposals: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
+ asyncVerifyingVotes--
+ continue
+ }
}
- cryptoOutputs := make([]asyncVerifyVoteResponse, len(votes))
- for i := 0; i < len(votes); i++ {
+ for i := 0; i < asyncVerifyingVotes; i++ {
resp := <-results
cryptoOutputs[resp.index] = resp
}
@@ -527,27 +552,45 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
}
t.node.monitor.dec(pseudonodeCoserviceType)
+ outputTimeout := time.After(maxPseudonodeOutputWaitDuration)
// push results into channel.
+verifiedVotesLoop:
for _, r := range verifiedVotes {
- select {
- case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
+ t.node.keys.Record(r.v.R.Sender, r.v.R.Round, account.BlockProposal)
+ continue verifiedVotesLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal vote to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
+verifiedPayloadsLoop:
for _, payload := range verifiedPayloads {
msg := message{Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: payload.u(), Proposal: payload}
- select {
- case t.out <- messageEvent{T: payloadVerified, Input: msg}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: payloadVerified, Input: msg}:
+ continue verifiedPayloadsLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal payload to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
}
diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go
index 72f1a427c..e65855556 100644
--- a/agreement/pseudonode_test.go
+++ b/agreement/pseudonode_test.go
@@ -19,7 +19,9 @@ package agreement
import (
"context"
"crypto/sha256"
+ "errors"
"fmt"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -30,6 +32,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
)
// The serializedPseudonode is the trivial implementation for the pseudonode interface
@@ -126,7 +129,7 @@ func compareEventChannels(t *testing.T, ch1, ch2 <-chan externalEvent) bool {
}
}
default:
- assert.NoError(t, fmt.Errorf("Unexpected tag %v encountered", ev1.Input.Tag))
+ assert.NoError(t, fmt.Errorf("Unexpected tag '%v' encountered", ev1.Input.Tag))
}
}
return true
@@ -145,7 +148,7 @@ func TestPseudonode(t *testing.T) {
sLogger := serviceLogger{logging.NewLogger()}
sLogger.SetLevel(logging.Warn)
- keyManager := simpleKeyManager(accounts)
+ keyManager := makeRecordingKeyManager(accounts)
pb := makePseudonode(pseudonodeParams{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
@@ -222,6 +225,8 @@ func TestPseudonode(t *testing.T) {
}
messageEvent, typeOk := ev.(messageEvent)
assert.True(t, true, typeOk)
+ // Verify votes are recorded - everyone is voting and proposing blocks.
+ keyManager.ValidateVoteRound(t, messageEvent.Input.Vote.R.Sender, startRound)
events[messageEvent.t()] = append(events[messageEvent.t()], messageEvent)
}
assert.Subset(t, []int{5, 6, 7, 8, 9, 10}, []int{len(events[voteVerified])})
@@ -390,6 +395,9 @@ func (k *KeyManagerProxy) VotingKeys(votingRound, balanceRound basics.Round) []a
return k.target(votingRound, balanceRound)
}
+func (k *KeyManagerProxy) Record(account basics.Address, round basics.Round, action account.ParticipationAction) {
+}
+
func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -403,7 +411,7 @@ func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
sLogger := serviceLogger{logging.NewLogger()}
sLogger.SetLevel(logging.Warn)
- keyManager := simpleKeyManager(accounts)
+ keyManager := makeRecordingKeyManager(accounts)
pb := makePseudonode(pseudonodeParams{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
@@ -447,3 +455,95 @@ func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
pb.loadRoundParticipationKeys(basics.Round(rnd))
}
}
+
+type substrServiceLogger struct {
+ logging.Logger
+ looupStrings []string
+ instancesFound []int
+}
+
+func (ssl *substrServiceLogger) Infof(s string, args ...interface{}) {
+ for i, str := range ssl.looupStrings {
+ if strings.Contains(s, str) {
+ ssl.instancesFound[i]++
+ return
+ }
+ }
+}
+
+// TestPseudonodeFailedEnqueuedTasks test to see that in the case where we cannot enqueue the verification task to the backlog, we won't be waiting forever - instead,
+// we would generate a warning message and keep going.
+func TestPseudonodeFailedEnqueuedTasks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+
+ // generate a nice, fixed hash.
+ rootSeed := sha256.Sum256([]byte(t.Name()))
+ accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:])
+ ledger := makeTestLedger(balances)
+
+ subStrLogger := &substrServiceLogger{
+ Logger: logging.TestingLog(t),
+ looupStrings: []string{"pseudonode.makeVotes: failed to enqueue vote verification for", "pseudonode.makeProposals: failed to enqueue vote verification"},
+ instancesFound: []int{0, 0},
+ }
+ sLogger := serviceLogger{
+ Logger: subStrLogger,
+ }
+ sLogger.SetLevel(logging.Warn)
+
+ keyManager := makeRecordingKeyManager(accounts)
+
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+
+ pb := makePseudonode(pseudonodeParams{
+ factory: testBlockFactory{Owner: 0},
+ validator: testBlockValidator{},
+ keys: keyManager,
+ ledger: ledger,
+ voteVerifier: voteVerifier,
+ log: sLogger,
+ monitor: nil,
+ })
+ defer pb.Quit()
+
+ startRound := ledger.NextRound()
+
+ channels := make([]<-chan externalEvent, 0)
+ var ch <-chan externalEvent
+ var err error
+ for i := 0; i < pseudonodeVerificationBacklog*2; i++ {
+ ch, err = pb.MakeProposals(context.Background(), startRound, period(i))
+ if err != nil {
+ require.Subset(t, []int{pseudonodeVerificationBacklog, pseudonodeVerificationBacklog + 1}, []int{i})
+ break
+ }
+ channels = append(channels, ch)
+ }
+ require.Error(t, err, "MakeProposals did not returned an error when being overflowed with requests")
+ require.True(t, errors.Is(err, errPseudonodeBacklogFull))
+
+ persist := make(chan error)
+ close(persist)
+ for i := 0; i < pseudonodeVerificationBacklog*2; i++ {
+ ch, err = pb.MakeVotes(context.Background(), startRound, period(i), step(i%5), makeProposalValue(period(i), accounts[0].Address()), persist)
+ if err != nil {
+ require.Subset(t, []int{pseudonodeVerificationBacklog, pseudonodeVerificationBacklog + 1}, []int{i})
+ break
+ }
+ channels = append(channels, ch)
+ }
+ require.Error(t, err, "MakeVotes did not returned an error when being overflowed with requests")
+
+ // drain output channels.
+ for _, ch := range channels {
+ drainChannel(ch)
+ }
+ require.Equal(t, 330, subStrLogger.instancesFound[0])
+ require.Equal(t, 330, subStrLogger.instancesFound[1])
+}
diff --git a/agreement/selector.go b/agreement/selector.go
index 623c4d23e..74bfdebdd 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -64,7 +64,7 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
balanceRound := balanceRound(r, cparams)
seedRound := seedRound(r, cparams)
- record, err := l.Lookup(balanceRound, addr)
+ record, err := l.LookupAgreement(balanceRound, addr)
if err != nil {
err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain balance record for address %v in round %d: %w", r, addr, balanceRound, err)
return
@@ -82,7 +82,7 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
return
}
- m.Record = committee.BalanceRecord{AccountData: record, Addr: addr}
+ m.Record = committee.BalanceRecord{OnlineAccountData: record, Addr: addr}
m.Selector = selector{Seed: seed, Round: r, Period: p, Step: s}
m.TotalMoney = total
return m, nil
diff --git a/agreement/service_test.go b/agreement/service_test.go
index b0469237f..68db73a60 100644
--- a/agreement/service_test.go
+++ b/agreement/service_test.go
@@ -105,22 +105,6 @@ func (c *testingClock) fire(d time.Duration) {
close(c.TA[d])
}
-type simpleKeyManager []account.Participation
-
-func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
- var km []account.Participation
- for _, acc := range m {
- if acc.OverlapsInterval(votingRound, votingRound) {
- km = append(km, acc)
- }
- }
- return km
-}
-
-func (m simpleKeyManager) DeleteOldKeys(basics.Round) {
- // noop
-}
-
type testingNetwork struct {
validator BlockValidator
@@ -743,7 +727,7 @@ func setupAgreementWithValidator(t *testing.T, numNodes int, traceLevel traceLev
m.coserviceListener = am.coserviceListener(nodeID(i))
clocks[i] = makeTestingClock(m)
ledgers[i] = ledgerFactory(balances)
- keys := simpleKeyManager(accounts[i : i+1])
+ keys := makeRecordingKeyManager(accounts[i : i+1])
endpoint := baseNetwork.testingNetworkEndpoint(nodeID(i))
ilog := log.WithFields(logging.Fields{"Source": "service-" + strconv.Itoa(i)})
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 5c7bb9344..a11e7db1e 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -63,6 +63,11 @@ func (d *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMe
return nil
}
+// GetConnectionLatency returns the connection latency between the local node and this peer.
+func (d *mockUnicastPeer) GetConnectionLatency() time.Duration {
+ return time.Duration(0)
+}
+
func TestPeerAddress(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/catchup/service.go b/catchup/service.go
index 211b31608..27ce957ba 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -18,6 +18,7 @@ package catchup
import (
"context"
+ "errors"
"fmt"
"sync"
"sync/atomic"
@@ -28,7 +29,6 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
@@ -58,8 +58,8 @@ type Ledger interface {
LastRound() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
IsWritingCatchpointFile() bool
- Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error)
- AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error
+ Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error)
+ AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error
}
// Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network.
@@ -156,8 +156,19 @@ func (s *Service) SynchronizingTime() time.Duration {
return time.Duration(timeInNS - startNS)
}
+// errLedgerAlreadyHasBlock is returned by innerFetch in case the local ledger already has the requested block.
+var errLedgerAlreadyHasBlock = errors.New("ledger already has block")
+
// function scope to make a bunch of defer statements better
func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) {
+ ledgerWaitCh := s.ledger.Wait(r)
+ select {
+ case <-ledgerWaitCh:
+ // if our ledger already have this block, no need to attempt to fetch it.
+ return nil, nil, time.Duration(0), errLedgerAlreadyHasBlock
+ default:
+ }
+
ctx, cf := context.WithCancel(s.ctx)
fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg)
defer cf()
@@ -166,11 +177,21 @@ func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeepin
go func() {
select {
case <-stopWaitingForLedgerRound:
- case <-s.ledger.Wait(r):
+ case <-ledgerWaitCh:
cf()
}
}()
- return fetcher.fetchBlock(ctx, r, peer)
+ blk, cert, ddur, err = fetcher.fetchBlock(ctx, r, peer)
+ // check to see if we aborted due to ledger.
+ if err != nil {
+ select {
+ case <-ledgerWaitCh:
+ // yes, we aborted since the ledger received this round.
+ err = errLedgerAlreadyHasBlock
+ default:
+ }
+ }
+ return
}
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
@@ -219,6 +240,10 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
block, cert, blockDownloadDuration, err := s.innerFetch(r, peer)
if err != nil {
+ if err == errLedgerAlreadyHasBlock {
+ // ledger already has the block, no need to request this block from anyone.
+ return true
+ }
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
peerSelector.rankPeer(psp, peerRankDownloadFailed)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
@@ -307,7 +332,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
}
if s.cfg.CatchupVerifyTransactionSignatures() || s.cfg.CatchupVerifyApplyData() {
- var vb *ledger.ValidatedBlock
+ var vb *ledgercore.ValidatedBlock
vb, err = s.ledger.Validate(s.ctx, *block, s.blockValidationPool)
if err != nil {
if s.ctx.Err() != nil {
@@ -324,6 +349,9 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
if err != nil {
switch err.(type) {
+ case ledgercore.ErrNonSequentialBlockEval:
+ s.log.Infof("fetchAndWrite(%d): no need to re-evaluate historical block", r)
+ return true
case ledgercore.BlockInLedgerError:
s.log.Infof("fetchAndWrite(%d): block already in ledger", r)
return true
diff --git a/catchup/service_test.go b/catchup/service_test.go
index a0df457a1..4cb89338d 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -35,7 +35,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -658,11 +658,11 @@ func (m *mockedLedger) AddBlock(blk bookkeeping.Block, cert agreement.Certificat
return nil
}
-func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error) {
+func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
return nil, nil
}
-func (m *mockedLedger) AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error {
+func (m *mockedLedger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
return nil
}
@@ -722,6 +722,10 @@ func (m *mockedLedger) LookupDigest(basics.Round) (crypto.Digest, error) {
return crypto.Digest{}, errors.New("not needed for mockedLedger")
}
+func (m *mockedLedger) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) {
+ return basics.OnlineAccountData{}, errors.New("not needed for mockedLedger")
+}
+
func (m *mockedLedger) IsWritingCatchpointFile() bool {
return false
}
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
index 2d4957fdb..acd51c002 100644
--- a/catchup/universalFetcher.go
+++ b/catchup/universalFetcher.go
@@ -64,6 +64,9 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro
config: &uf.config,
}
fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round)
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
address = fetcherClient.address()
} else if httpPeer, validHTTPPeer := peer.(network.HTTPPeer); validHTTPPeer {
fetcherClient := &HTTPFetcher{
@@ -74,14 +77,14 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro
log: uf.log,
config: &uf.config}
fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round)
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
address = fetcherClient.address()
} else {
return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer")
}
downloadDuration = time.Now().Sub(blockDownloadStartTime)
- if err != nil {
- return nil, nil, time.Duration(0), err
- }
block, cert, err := processBlockBytes(fetchedBuf, round, address)
if err != nil {
return nil, nil, time.Duration(0), err
diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go
index 28cd0b566..79cd484ac 100644
--- a/catchup/universalFetcher_test.go
+++ b/catchup/universalFetcher_test.go
@@ -182,6 +182,7 @@ func TestRequestBlockBytesErrors(t *testing.T) {
t.Fatal(err)
return
}
+ defer ledger.Ledger.Close()
blockServiceConfig := config.GetDefaultLocal()
blockServiceConfig.EnableBlockService = true
@@ -191,6 +192,7 @@ func TestRequestBlockBytesErrors(t *testing.T) {
up := makeTestUnicastPeer(net, t)
ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
ls.Start()
+ defer ls.Stop()
fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg)
@@ -198,7 +200,7 @@ func TestRequestBlockBytesErrors(t *testing.T) {
cancel()
_, _, _, err = fetcher.fetchBlock(ctx, next, up)
var wrfe errWsFetcherRequestFailed
- require.True(t, errors.As(err, &wrfe))
+ require.True(t, errors.As(err, &wrfe), "unexpected err: %w", wrfe)
require.Equal(t, "context canceled", err.(errWsFetcherRequestFailed).cause)
ctx = context.Background()
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index 15eb857ce..a09485e83 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -63,7 +63,7 @@ var fileCmd = &cobra.Command{
if err != nil || len(tarFileBytes) == 0 {
reportErrorf("Unable to read '%s' : %v", tarFile, err)
}
- genesisInitState := ledger.InitState{}
+ genesisInitState := ledgercore.InitState{}
cfg := config.GetDefaultLocal()
l, err := ledger.OpenLedger(logging.Base(), "./ledger", false, genesisInitState, cfg)
if err != nil {
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 354d2c360..79fd986fa 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -30,6 +30,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
tools "github.com/algorand/go-algorand/tools/network"
@@ -268,7 +269,7 @@ func saveCatchpointTarFile(addr string, catchpointFileBytes []byte) (err error)
}
func makeFileDump(addr string, catchpointFileBytes []byte) error {
- genesisInitState := ledger.InitState{}
+ genesisInitState := ledgercore.InitState{}
deleteLedgerFiles := func() {
os.Remove("./ledger.block.sqlite")
os.Remove("./ledger.block.sqlite-shm")
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index d1bf2f6cf..7d20af38f 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -59,6 +59,7 @@ var (
partKeyOutDir string
partKeyFile string
partKeyDeleteInput bool
+ listpartkeyCompat bool
importDefault bool
mnemonic string
dumpOutFile string
@@ -165,6 +166,9 @@ func init() {
installParticipationKeyCmd.MarkFlagRequired("partkey")
installParticipationKeyCmd.Flags().BoolVar(&partKeyDeleteInput, "delete-input", false, "Acknowledge that installpartkey will delete the input key file")
+ // listpartkey flags
+ listParticipationKeysCmd.Flags().BoolVarP(&listpartkeyCompat, "compatibility", "c", false, "Print output in compatibility mode. This option will be removed in a future release, please use REST API for tooling.")
+
// import flags
importCmd.Flags().BoolVarP(&importDefault, "default", "f", false, "Set this account as the default one")
importCmd.Flags().StringVarP(&mnemonic, "mnemonic", "m", "", "Mnemonic to import (will prompt otherwise)")
@@ -933,7 +937,7 @@ var renewParticipationKeyCmd = &cobra.Command{
}
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeys()
+ parts, err := client.ListParticipationKeyFiles()
if err != nil {
reportErrorf(errorRequestFail, err)
}
@@ -991,7 +995,7 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
client := ensureAlgodClient(dataDir)
// Build list of accounts to renew from all accounts with part keys present
- parts, err := client.ListParticipationKeys()
+ parts, err := client.ListParticipationKeyFiles()
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
@@ -1051,12 +1055,73 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
return nil
}
+func maxRound(current uint64, next *uint64) uint64 {
+ if next != nil && *next > current {
+ return *next
+ }
+ return current
+}
+
+func uintToStr(number uint64) string {
+ return fmt.Sprintf("%d", number)
+}
+
+// legacyListParticipationKeysCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to the REST API.
+func legacyListParticipationKeysCommand() {
+ dataDir := ensureSingleDataDir()
+
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ parts, err := client.ListParticipationKeyFiles()
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ var filenames []string
+ for fn := range parts {
+ filenames = append(filenames, fn)
+ }
+ sort.Strings(filenames)
+
+ rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
+ fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
+ for _, fn := range filenames {
+ onlineInfoStr := "unknown"
+ onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ if err == nil {
+ votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
+ vrfBytes := parts[fn].VRF.PK
+ if onlineAccountInfo.Participation != nil &&
+ (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
+ (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
+ (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
+ (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ }
+ // it's okay to proceed without algod info
+ first, last := parts[fn].ValidInterval()
+ fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
+ fmt.Sprintf("%d", first),
+ fmt.Sprintf("%d", last),
+ fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
+ }
+}
+
var listParticipationKeysCmd = &cobra.Command{
Use: "listpartkeys",
- Short: "List participation keys",
- Long: `List all participation keys tracked by algod, with additional information such as key validity period.`,
+ Short: "List participation keys summary",
+ Long: `List all participation keys tracked by algod along with summary of additional information. For detailed key information use 'partkeyinfo'.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
+ if listpartkeyCompat {
+ legacyListParticipationKeysCommand()
+ return
+ }
dataDir := ensureSingleDataDir()
client := ensureGoalClient(dataDir, libgoal.DynamicClient)
@@ -1065,37 +1130,53 @@ var listParticipationKeysCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- var filenames []string
- for fn := range parts {
- filenames = append(filenames, fn)
- }
- sort.Strings(filenames)
-
- rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
- fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
- for _, fn := range filenames {
+ // Squeezed this into 77 characters.
+ rowFormat := "%-10s %-11s %-15s %10s %11s %10s\n"
+ fmt.Printf(rowFormat, "Registered", "Account", "ParticipationID", "Last Used", "First round", "Last round")
+ for _, part := range parts {
onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ onlineAccountInfo, err := client.AccountInformation(part.Address)
if err == nil {
- votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
- vrfBytes := parts[fn].VRF.PK
+ votingBytes := part.Key.VoteParticipationKey
+ vrfBytes := part.Key.SelectionParticipationKey
if onlineAccountInfo.Participation != nil &&
(string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
(string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
- (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
- (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ (onlineAccountInfo.Participation.VoteFirst == part.Key.VoteFirstValid) &&
+ (onlineAccountInfo.Participation.VoteLast == part.Key.VoteLastValid) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == part.Key.VoteKeyDilution) {
onlineInfoStr = "yes"
} else {
onlineInfoStr = "no"
}
+
+ /*
+ // PKI TODO: We could avoid querying the account with something like this.
+ // One problem is that it doesn't account for multiple keys on the same
+ // account, so we'd still need to query the round.
+ if part.EffectiveFirstValid != nil && part.EffectiveLastValid < currentRound {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ */
+
+ // it's okay to proceed without algod info
+ lastUsed := maxRound(0, part.LastVote)
+ lastUsed = maxRound(lastUsed, part.LastBlockProposal)
+ lastUsed = maxRound(lastUsed, part.LastStateProof)
+ lastUsedString := "N/A"
+ if lastUsed != 0 {
+ lastUsedString = uintToStr(lastUsed)
+ }
+ fmt.Printf(rowFormat,
+ onlineInfoStr,
+ fmt.Sprintf("%s...%s", part.Address[:4], part.Address[len(part.Address)-4:]),
+ fmt.Sprintf("%s...", part.Id[:8]),
+ lastUsedString,
+ uintToStr(part.Key.VoteFirstValid),
+ uintToStr(part.Key.VoteLastValid))
}
- // it's okay to proceed without algod info
- first, last := parts[fn].ValidInterval()
- fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
- fmt.Sprintf("%d", first),
- fmt.Sprintf("%d", last),
- fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
}
},
}
@@ -1276,14 +1357,11 @@ var importRootKeysCmd = &cobra.Command{
},
}
-type partkeyInfo struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Address string `codec:"acct"`
- FirstValid basics.Round `codec:"first"`
- LastValid basics.Round `codec:"last"`
- VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
- SelectionID crypto.VRFVerifier `codec:"sel"`
- VoteKeyDilution uint64 `codec:"voteKD"`
+func strOrNA(value *uint64) string {
+ if value == nil {
+ return "N/A"
+ }
+ return uintToStr(*value)
}
var partkeyInfoCmd = &cobra.Command{
@@ -1295,7 +1373,7 @@ var partkeyInfoCmd = &cobra.Command{
onDataDirs(func(dataDir string) {
fmt.Printf("Dumping participation key info from %s...\n", dataDir)
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ client := ensureAlgodClient(dataDir)
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
parts, err := client.ListParticipationKeys()
@@ -1303,18 +1381,23 @@ var partkeyInfoCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- for filename, part := range parts {
- fmt.Println("------------------------------------------------------------------")
- info := partkeyInfo{
- Address: part.Address().String(),
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
- SelectionID: part.VRFSecrets().PK,
- VoteKeyDilution: part.KeyDilution,
- }
- infoString := protocol.EncodeJSON(&info)
- fmt.Printf("File: %s\n%s\n", filename, string(infoString))
+ for _, part := range parts {
+ fmt.Println()
+ fmt.Printf("Participation ID: %s\n", part.Id)
+ fmt.Printf("Parent address: %s\n", part.Address)
+ fmt.Printf("Last vote round: %s\n", strOrNA(part.LastVote))
+ fmt.Printf("Last block proposal round: %s\n", strOrNA(part.LastBlockProposal))
+ // PKI TODO: enable with state proof support.
+ //fmt.Printf("Last state proof round: %s\n", strOrNA(part.LastStateProof))
+ fmt.Printf("Effective first round: %s\n", strOrNA(part.EffectiveFirstValid))
+ fmt.Printf("Effective last round: %s\n", strOrNA(part.EffectiveLastValid))
+ fmt.Printf("First round: %d\n", part.Key.VoteFirstValid)
+ fmt.Printf("Last round: %d\n", part.Key.VoteLastValid)
+ fmt.Printf("Key dilution: %d\n", part.Key.VoteKeyDilution)
+ fmt.Printf("Selection key: %s\n", base64.StdEncoding.EncodeToString(part.Key.SelectionParticipationKey))
+ fmt.Printf("Voting key: %s\n", base64.StdEncoding.EncodeToString(part.Key.VoteParticipationKey))
+ // PKI TODO: enable with state proof support.
+ //fmt.Printf("State proof key: %s\n", base64.StdEncoding.EncodeToString(part.StateProofKey))
}
})
},
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 5a1bed482..49a2ce9c5 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -17,6 +17,8 @@
package main
import (
+ "bytes"
+ "crypto/sha512"
"encoding/base32"
"encoding/base64"
"encoding/binary"
@@ -28,6 +30,7 @@ import (
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/abi"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -42,12 +45,15 @@ var (
approvalProgFile string
clearProgFile string
+ method string
+ methodArgs []string
+
approvalProgRawFile string
clearProgRawFile string
extraPages uint32
- createOnCompletion string
+ onCompletion string
localSchemaUints uint64
localSchemaByteSlices uint64
@@ -80,9 +86,10 @@ func init() {
appCmd.AddCommand(clearAppCmd)
appCmd.AddCommand(readStateAppCmd)
appCmd.AddCommand(infoAppCmd)
+ appCmd.AddCommand(methodAppCmd)
appCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
- appCmd.PersistentFlags().StringSliceVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
+ appCmd.PersistentFlags().StringArrayVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
appCmd.PersistentFlags().StringSliceVar(&foreignApps, "foreign-app", nil, "Indexes of other apps whose global state is read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&foreignAssets, "foreign-asset", nil, "Indexes of assets whose parameters are read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&appStrAccounts, "app-account", nil, "Accounts that may be accessed from application logic")
@@ -99,7 +106,7 @@ func init() {
createAppCmd.Flags().Uint64Var(&localSchemaUints, "local-ints", 0, "Maximum number of integer values that may be stored in local (per-account) key/value stores for this app. Immutable.")
createAppCmd.Flags().Uint64Var(&localSchemaByteSlices, "local-byteslices", 0, "Maximum number of byte slices that may be stored in local (per-account) key/value stores for this app. Immutable.")
createAppCmd.Flags().StringVar(&appCreator, "creator", "", "Account to create the application")
- createAppCmd.Flags().StringVar(&createOnCompletion, "on-completion", "NoOp", "OnCompletion action for application transaction")
+ createAppCmd.Flags().StringVar(&onCompletion, "on-completion", "NoOp", "OnCompletion action for application transaction")
createAppCmd.Flags().Uint32Var(&extraPages, "extra-pages", 0, "Additional program space for supporting larger TEAL assembly program. A maximum of 3 extra pages is allowed. A page is 1024 bytes.")
callAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to call app from")
@@ -109,6 +116,11 @@ func init() {
deleteAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to send delete transaction from")
readStateAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to fetch state from")
updateAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to send update transaction from")
+ methodAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to call method from")
+
+ methodAppCmd.Flags().StringVar(&method, "method", "", "Method to be called")
+ methodAppCmd.Flags().StringArrayVar(&methodArgs, "arg", nil, "Args to pass in for calling a method")
+ methodAppCmd.Flags().StringVar(&onCompletion, "on-completion", "NoOp", "OnCompletion action for application transaction")
// Can't use PersistentFlags on the root because for some reason marking
// a root command as required with MarkPersistentFlagRequired isn't
@@ -121,6 +133,7 @@ func init() {
readStateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
updateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
infoAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
+ methodAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
// Add common transaction flags to all txn-generating app commands
addTxnFlags(createAppCmd)
@@ -130,6 +143,7 @@ func init() {
addTxnFlags(optInAppCmd)
addTxnFlags(closeOutAppCmd)
addTxnFlags(clearAppCmd)
+ addTxnFlags(methodAppCmd)
readStateAppCmd.Flags().BoolVar(&fetchLocal, "local", false, "Fetch account-specific state for this application. `--from` address is required when using this flag")
readStateAppCmd.Flags().BoolVar(&fetchGlobal, "global", false, "Fetch global state for this application.")
@@ -162,6 +176,11 @@ func init() {
readStateAppCmd.MarkFlagRequired("app-id")
infoAppCmd.MarkFlagRequired("app-id")
+
+ methodAppCmd.MarkFlagRequired("method") // nolint:errcheck // follow previous required flag format
+ methodAppCmd.MarkFlagRequired("app-id") // nolint:errcheck
+ methodAppCmd.MarkFlagRequired("from") // nolint:errcheck
+ methodAppCmd.Flags().MarkHidden("app-arg") // nolint:errcheck
}
type appCallArg struct {
@@ -230,6 +249,23 @@ func parseAppArg(arg appCallArg) (rawValue []byte, parseErr error) {
return
}
rawValue = data
+ case "abi":
+ typeAndValue := strings.SplitN(arg.Value, ":", 2)
+ if len(typeAndValue) != 2 {
+ parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
+ return
+ }
+ abiType, err := abi.TypeOf(typeAndValue[0])
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
+ return
+ }
+ value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
+ return
+ }
+ return abiType.Encode(value)
default:
parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
}
@@ -267,6 +303,20 @@ func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint
return parseAppInputs(inputs)
}
+// filterEmptyStrings filters out empty string parsed in by StringArrayVar
+// this function is added to support abi argument parsing
+// since parsing of `appArg` diverted from `StringSliceVar` to `StringArrayVar`
+func filterEmptyStrings(strSlice []string) []string {
+ var newStrSlice []string
+
+ for _, str := range strSlice {
+ if len(str) > 0 {
+ newStrSlice = append(newStrSlice, str)
+ }
+ }
+ return newStrSlice
+}
+
func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
if (appArgs != nil || appStrAccounts != nil || foreignApps != nil) && appInputFilename != "" {
reportErrorf("Cannot specify both command-line arguments/accounts and JSON input filename")
@@ -276,7 +326,11 @@ func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, for
}
var encodedArgs []appCallArg
- for _, arg := range appArgs {
+
+ // we need to filter out empty strings from appArgs first, caused by change to `StringArrayVar`
+ newAppArgs := filterEmptyStrings(appArgs)
+
+ for _, arg := range newAppArgs {
encodingValue := strings.SplitN(arg, ":", 2)
if len(encodingValue) != 2 {
reportErrorf("all arguments should be of the form 'encoding:value'")
@@ -328,6 +382,12 @@ func mustParseOnCompletion(ocString string) (oc transactions.OnCompletion) {
}
}
+func getDataDirAndClient() (dataDir string, client libgoal.Client) {
+ dataDir = ensureSingleDataDir()
+ client = ensureFullClient(dataDir)
+ return
+}
+
func mustParseProgArgs() (approval []byte, clear []byte) {
// Ensure we don't have ambiguous or all empty args
if (approvalProgFile == "") == (approvalProgRawFile == "") {
@@ -358,9 +418,7 @@ var createAppCmd = &cobra.Command{
Long: `Issue a transaction that creates an application`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
-
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Construct schemas from args
localSchema := basics.StateSchema{
@@ -375,15 +433,15 @@ var createAppCmd = &cobra.Command{
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
- onCompletion := mustParseOnCompletion(createOnCompletion)
+ onCompletionEnum := mustParseOnCompletion(onCompletion)
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
- switch onCompletion {
+ switch onCompletionEnum {
case transactions.CloseOutOC, transactions.ClearStateOC:
- reportWarnf("'--on-completion %s' may be ill-formed for 'goal app create'", createOnCompletion)
+ reportWarnf("'--on-completion %s' may be ill-formed for 'goal app create'", onCompletion)
}
- tx, err := client.MakeUnsignedAppCreateTx(onCompletion, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, extraPages)
+ tx, err := client.MakeUnsignedAppCreateTx(onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, extraPages)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -434,19 +492,13 @@ var createAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
// Write transaction to file
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -458,8 +510,7 @@ var updateAppCmd = &cobra.Command{
Long: `Issue a transaction that updates an application's ApprovalProgram and ClearStateProgram`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
@@ -513,18 +564,12 @@ var updateAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -536,8 +581,7 @@ var optInAppCmd = &cobra.Command{
Long: `Opt an account in to an application, allocating local state in your account`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -590,18 +634,12 @@ var optInAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -613,8 +651,7 @@ var closeOutAppCmd = &cobra.Command{
Long: `Close an account out of an application, removing local state from your account. The application must still exist. If it doesn't, use 'goal app clear'.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -667,18 +704,12 @@ var closeOutAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -690,8 +721,7 @@ var clearAppCmd = &cobra.Command{
Long: `Remove any local state from your account associated with an application. The application does not need to exist anymore.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -744,18 +774,12 @@ var clearAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -767,8 +791,7 @@ var callAppCmd = &cobra.Command{
Long: `Call an application, invoking application-specific functionality`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -821,18 +844,12 @@ var callAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -844,8 +861,7 @@ var deleteAppCmd = &cobra.Command{
Long: `Delete an application, removing the global state and other application parameters from the creator's account`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -898,18 +914,13 @@ var deleteAppCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, tx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
- if err != nil {
- reportErrorf(err.Error())
- }
+
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -921,8 +932,7 @@ var readStateAppCmd = &cobra.Command{
Long: `Read global or local (account-specific) state for an application`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ _, client := getDataDirAndClient()
// Ensure exactly one of --local or --global is specified
if fetchLocal == fetchGlobal {
@@ -1003,8 +1013,7 @@ var infoAppCmd = &cobra.Command{
Long: `Look up application information stored on the network, such as program hash.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ _, client := getDataDirAndClient()
meta, err := client.ApplicationInformation(appIdx)
if err != nil {
@@ -1037,3 +1046,264 @@ var infoAppCmd = &cobra.Command{
}
},
}
+
+// populateMethodCallTxnArgs parses and loads transactions from the files indicated by the values
+// slice. An error will occur if the transaction does not matched the expected type, it has a nonzero
+// group ID, or if it is signed by a normal signature or Msig signature (but not Lsig signature)
+func populateMethodCallTxnArgs(types []string, values []string) ([]transactions.SignedTxn, error) {
+ loadedTxns := make([]transactions.SignedTxn, len(values))
+
+ for i, txFilename := range values {
+ data, err := readFile(txFilename)
+ if err != nil {
+ return nil, fmt.Errorf(fileReadError, txFilename, err)
+ }
+
+ var txn transactions.SignedTxn
+ err = protocol.Decode(data, &txn)
+ if err != nil {
+ return nil, fmt.Errorf(txDecodeError, txFilename, err)
+ }
+
+ if !txn.Sig.Blank() || !txn.Msig.Blank() {
+ return nil, fmt.Errorf("Transaction from %s has already been signed", txFilename)
+ }
+
+ if !txn.Txn.Group.IsZero() {
+ return nil, fmt.Errorf("Transaction from %s already has a group ID: %s", txFilename, txn.Txn.Group)
+ }
+
+ expectedType := types[i]
+ if expectedType != "txn" && txn.Txn.Type != protocol.TxType(expectedType) {
+ return nil, fmt.Errorf("Transaction from %s does not match method argument type. Expected %s, got %s", txFilename, expectedType, txn.Txn.Type)
+ }
+
+ loadedTxns[i] = txn
+ }
+
+ return loadedTxns, nil
+}
+
+var methodAppCmd = &cobra.Command{
+ Use: "method",
+ Short: "Invoke a method",
+ Long: `Invoke a method in an App (stateful contract) with an application call transaction`,
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ dataDir, client := getDataDirAndClient()
+
+ // Parse transaction parameters
+ appArgsParsed, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ if len(appArgsParsed) > 0 {
+ reportErrorf("in goal app method: --arg and --app-arg are mutually exclusive, do not use --app-arg")
+ }
+
+ onCompletionEnum := mustParseOnCompletion(onCompletion)
+
+ if appIdx == 0 {
+ reportErrorf("app id == 0, goal app create not supported in goal app method")
+ }
+
+ var approvalProg, clearProg []byte
+ if onCompletionEnum == transactions.UpdateApplicationOC {
+ approvalProg, clearProg = mustParseProgArgs()
+ }
+
+ var applicationArgs [][]byte
+
+ // insert the method selector hash
+ hash := sha512.Sum512_256([]byte(method))
+ applicationArgs = append(applicationArgs, hash[0:4])
+
+ // parse down the ABI type from method signature
+ _, argTypes, retTypeStr, err := abi.ParseMethodSignature(method)
+ if err != nil {
+ reportErrorf("cannot parse method signature: %v", err)
+ }
+
+ var retType *abi.Type
+ if retTypeStr != "void" {
+ theRetType, err := abi.TypeOf(retTypeStr)
+ if err != nil {
+ reportErrorf("cannot cast %s to abi type: %v", retTypeStr, err)
+ }
+ retType = &theRetType
+ }
+
+ if len(methodArgs) != len(argTypes) {
+ reportErrorf("incorrect number of arguments, method expected %d but got %d", len(argTypes), len(methodArgs))
+ }
+
+ var txnArgTypes []string
+ var txnArgValues []string
+ var basicArgTypes []string
+ var basicArgValues []string
+ for i, argType := range argTypes {
+ argValue := methodArgs[i]
+ if abi.IsTransactionType(argType) {
+ txnArgTypes = append(txnArgTypes, argType)
+ txnArgValues = append(txnArgValues, argValue)
+ } else {
+ basicArgTypes = append(basicArgTypes, argType)
+ basicArgValues = append(basicArgValues, argValue)
+ }
+ }
+
+ err = abi.ParseArgJSONtoByteSlice(basicArgTypes, basicArgValues, &applicationArgs)
+ if err != nil {
+ reportErrorf("cannot parse arguments to ABI encoding: %v", err)
+ }
+
+ txnArgs, err := populateMethodCallTxnArgs(txnArgTypes, txnArgValues)
+ if err != nil {
+ reportErrorf("error populating transaction arguments: %v", err)
+ }
+
+ appCallTxn, err := client.MakeUnsignedApplicationCallTx(
+ appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets,
+ onCompletionEnum, approvalProg, clearProg, basics.StateSchema{}, basics.StateSchema{}, 0)
+
+ if err != nil {
+ reportErrorf("Cannot create application txn: %v", err)
+ }
+
+ // Fill in note and lease
+ appCallTxn.Note = parseNoteField(cmd)
+ appCallTxn.Lease = parseLease(cmd)
+
+ // Fill in rounds, fee, etc.
+ fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ if err != nil {
+ reportErrorf("Cannot determine last valid round: %s", err)
+ }
+
+ appCallTxn, err = client.FillUnsignedTxTemplate(account, fv, lv, fee, appCallTxn)
+ if err != nil {
+ reportErrorf("Cannot construct transaction: %s", err)
+ }
+ explicitFee := cmd.Flags().Changed("fee")
+ if explicitFee {
+ appCallTxn.Fee = basics.MicroAlgos{Raw: fee}
+ }
+
+ // Compile group
+ var txnGroup []transactions.Transaction
+ for i := range txnArgs {
+ txnGroup = append(txnGroup, txnArgs[i].Txn)
+ }
+ txnGroup = append(txnGroup, appCallTxn)
+ if len(txnGroup) > 1 {
+ // Only if transaction arguments are present, assign group ID
+ groupID, err := client.GroupID(txnGroup)
+ if err != nil {
+ reportErrorf("Cannot assign transaction group ID: %s", err)
+ }
+ for i := range txnGroup {
+ txnGroup[i].Group = groupID
+ }
+ }
+
+ // Sign transactions
+ var signedTxnGroup []transactions.SignedTxn
+ shouldSign := sign || outFilename == ""
+ for i, unsignedTxn := range txnGroup {
+ txnFromArgs := transactions.SignedTxn{}
+ if i < len(txnArgs) {
+ txnFromArgs = txnArgs[i]
+ }
+
+ if !txnFromArgs.Lsig.Blank() {
+ signedTxnGroup = append(signedTxnGroup, transactions.SignedTxn{
+ Lsig: txnFromArgs.Lsig,
+ AuthAddr: txnFromArgs.AuthAddr,
+ Txn: unsignedTxn,
+ })
+ continue
+ }
+
+ signedTxn, err := createSignedTransaction(client, shouldSign, dataDir, walletName, unsignedTxn, txnFromArgs.AuthAddr)
+ if err != nil {
+ reportErrorf(errorSigningTX, err)
+ }
+
+ signedTxnGroup = append(signedTxnGroup, signedTxn)
+ }
+
+ // Output to file
+ if outFilename != "" {
+ if dumpForDryrun {
+ err = writeDryrunReqToFile(client, signedTxnGroup, outFilename)
+ } else {
+ err = writeSignedTxnsToFile(signedTxnGroup, outFilename)
+ }
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ return
+ }
+
+ // Broadcast
+ err = client.BroadcastTransactionGroup(signedTxnGroup)
+ if err != nil {
+ reportErrorf(errorBroadcastingTX, err)
+ }
+
+ // Report tx details to user
+ reportInfof("Issued %d transaction(s):", len(signedTxnGroup))
+ // remember the final txid in this variable
+ var txid string
+ for _, stxn := range signedTxnGroup {
+ txid = stxn.Txn.ID().String()
+ reportInfof("\tIssued transaction from account %s, txid %s (fee %d)", stxn.Txn.Sender, txid, stxn.Txn.Fee.Raw)
+ }
+
+ if !noWaitAfterSend {
+ _, err := waitForCommit(client, txid, lv)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+
+ resp, err := client.PendingTransactionInformationV2(txid)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+
+ if retType == nil {
+ fmt.Printf("method %s succeeded\n", method)
+ return
+ }
+
+ // specify the return hash prefix
+ hashRet := sha512.Sum512_256([]byte("return"))
+ hashRetPrefix := hashRet[:4]
+
+ var abiEncodedRet []byte
+ foundRet := false
+ if resp.Logs != nil {
+ for i := len(*resp.Logs) - 1; i >= 0; i-- {
+ retLog := (*resp.Logs)[i]
+ if bytes.HasPrefix(retLog, hashRetPrefix) {
+ abiEncodedRet = retLog[4:]
+ foundRet = true
+ break
+ }
+ }
+ }
+
+ if !foundRet {
+ reportErrorf("cannot find return log for abi type %s", retTypeStr)
+ }
+
+ decoded, err := retType.Decode(abiEncodedRet)
+ if err != nil {
+ reportErrorf("cannot decode return value %v: %v", abiEncodedRet, err)
+ }
+
+ decodedJSON, err := retType.MarshalToJSON(decoded)
+ if err != nil {
+ reportErrorf("cannot marshal returned bytes %v to JSON: %v", decoded, err)
+ }
+ fmt.Printf("method %s succeeded with output: %s\n", method, string(decodedJSON))
+ }
+ },
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 571c4d1a4..c5d69854a 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -131,6 +131,7 @@ func init() {
dryrunCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "consensus protocol version id string")
dryrunCmd.Flags().BoolVar(&dumpForDryrun, "dryrun-dump", false, "Dump in dryrun format acceptable by dryrun REST api instead of running")
dryrunCmd.Flags().Var(&dumpForDryrunFormat, "dryrun-dump-format", "Dryrun dump format: "+dumpForDryrunFormat.AllowedString())
+ dryrunCmd.Flags().StringSliceVar(&dumpForDryrunAccts, "dryrun-accounts", nil, "additional accounts to include into dryrun request obj")
dryrunCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing dryrun state object")
dryrunCmd.MarkFlagRequired("txfile")
@@ -193,34 +194,45 @@ func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound
return
}
-func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction) (stxn transactions.SignedTxn, err error) {
+func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction, signer basics.Address) (stxn transactions.SignedTxn, err error) {
if signTx {
// Sign the transaction
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- stxn, err = client.SignTransactionWithWallet(wh, pw, tx)
- if err != nil {
- return
- }
- } else {
- // Wrap in a transactions.SignedTxn with an empty sig.
- // This way protocol.Encode will encode the transaction type
- stxn, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, crypto.MultisigSig{})
- if err != nil {
- return
+ if signer.IsZero() {
+ stxn, err = client.SignTransactionWithWallet(wh, pw, tx)
+ } else {
+ stxn, err = client.SignTransactionWithWalletAndSigner(wh, pw, signer.String(), tx)
}
+ return
+ }
- stxn = populateBlankMultisig(client, dataDir, walletName, stxn)
+ // Wrap in a transactions.SignedTxn with an empty sig.
+ // This way protocol.Encode will encode the transaction type
+ stxn, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, crypto.MultisigSig{})
+ if err != nil {
+ return
}
+
+ stxn = populateBlankMultisig(client, dataDir, walletName, stxn)
return
}
+func writeSignedTxnsToFile(stxns []transactions.SignedTxn, filename string) error {
+ var outData []byte
+ for _, stxn := range stxns {
+ outData = append(outData, protocol.Encode(&stxn)...)
+ }
+
+ return writeFile(filename, outData, 0600)
+}
+
func writeTxnToFile(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction, filename string) error {
- stxn, err := createSignedTransaction(client, signTx, dataDir, walletName, tx)
+ stxn, err := createSignedTransaction(client, signTx, dataDir, walletName, tx, basics.Address{})
if err != nil {
return err
}
// Write the SignedTxn to the output file
- return writeFile(filename, protocol.Encode(&stxn), 0600)
+ return writeSignedTxnsToFile([]transactions.SignedTxn{stxn}, filename)
}
func getB64Args(args []string) [][]byte {
@@ -418,7 +430,7 @@ var sendCmd = &cobra.Command{
}
} else {
signTx := sign || (outFilename == "")
- stx, err = createSignedTransaction(client, signTx, dataDir, walletName, payment)
+ stx, err = createSignedTransaction(client, signTx, dataDir, walletName, payment, basics.Address{})
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -484,18 +496,12 @@ var sendCmd = &cobra.Command{
}
} else {
if dumpForDryrun {
- // Write dryrun data to file
- proto, _ := getProto(protoVersion)
- data, err := libgoal.MakeDryrunStateBytes(client, stx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
- if err != nil {
- reportErrorf(err.Error())
- }
- writeFile(outFilename, data, 0600)
+ err = writeDryrunReqToFile(client, stx, outFilename)
} else {
err = writeFile(outFilename, protocol.Encode(&stx), 0600)
- if err != nil {
- reportErrorf(err.Error())
- }
+ }
+ if err != nil {
+ reportErrorf(err.Error())
}
}
},
@@ -859,13 +865,12 @@ var groupCmd = &cobra.Command{
transactionIdx++
}
- var outData []byte
- for _, stxn := range stxns {
- stxn.Txn.Group = crypto.HashObj(group)
- outData = append(outData, protocol.Encode(&stxn)...)
+ groupHash := crypto.HashObj(group)
+ for i := range stxns {
+ stxns[i].Txn.Group = groupHash
}
- err = writeFile(outFilename, outData, 0600)
+ err = writeSignedTxnsToFile(stxns, outFilename)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
@@ -927,7 +932,7 @@ func assembleFile(fname string) (program []byte) {
}
ops, err := logic.AssembleString(string(text))
if err != nil {
- ops.ReportProblems(fname)
+ ops.ReportProblems(fname, os.Stderr)
reportErrorf("%s: %s", fname, err)
}
_, params := getProto(protoVersion)
@@ -1069,7 +1074,11 @@ var dryrunCmd = &cobra.Command{
// Write dryrun data to file
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
- data, err := libgoal.MakeDryrunStateBytes(client, nil, txgroup, string(proto), dumpForDryrunFormat.String())
+ accts, err := unmarshalSlice(dumpForDryrunAccts)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ data, err := libgoal.MakeDryrunStateBytes(client, nil, txgroup, accts, string(proto), dumpForDryrunFormat.String())
if err != nil {
reportErrorf(err.Error())
}
@@ -1182,3 +1191,16 @@ var dryrunRemoteCmd = &cobra.Command{
}
},
}
+
+// unmarshalSlice converts string addresses to basics.Address
+func unmarshalSlice(accts []string) ([]basics.Address, error) {
+ result := make([]basics.Address, 0, len(accts))
+ for _, acct := range accts {
+ addr, err := basics.UnmarshalChecksumAddress(acct)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, addr)
+ }
+ return result, nil
+}
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index bede5a309..9d3a19c15 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -563,7 +564,22 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
-// readFile is a wrapper of ioutil.ReadFile which consniders the
+// writeDryrunReqToFile creates dryrun request object and writes to a file
+func writeDryrunReqToFile(client libgoal.Client, txnOrStxn interface{}, outFilename string) (err error) {
+ proto, _ := getProto(protoVersion)
+ accts, err := unmarshalSlice(dumpForDryrunAccts)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ data, err := libgoal.MakeDryrunStateBytes(client, txnOrStxn, []transactions.SignedTxn{}, accts, string(proto), dumpForDryrunFormat.String())
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ err = writeFile(outFilename, data, 0600)
+ return
+}
+
+// readFile is a wrapper of ioutil.ReadFile which considers the
// special case of stdin filename
func readFile(filename string) ([]byte, error) {
if filename == stdinFileNameValue {
diff --git a/cmd/goal/common.go b/cmd/goal/common.go
index 2d8d4c3f6..23496a5cc 100644
--- a/cmd/goal/common.go
+++ b/cmd/goal/common.go
@@ -41,14 +41,15 @@ var lastValid uint64
var numValidRounds uint64 // also used in account and asset
var (
- fee uint64
- outFilename string
- sign bool
- noteBase64 string
- noteText string
- lease string
- noWaitAfterSend bool
- dumpForDryrun bool
+ fee uint64
+ outFilename string
+ sign bool
+ noteBase64 string
+ noteText string
+ lease string
+ noWaitAfterSend bool
+ dumpForDryrun bool
+ dumpForDryrunAccts []string
)
var dumpForDryrunFormat cobraStringValue = *makeCobraStringValue("json", []string{"msgp"})
@@ -66,6 +67,7 @@ func addTxnFlags(cmd *cobra.Command) {
cmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit")
cmd.Flags().BoolVar(&dumpForDryrun, "dryrun-dump", false, "Dump in dryrun format acceptable by dryrun REST api")
cmd.Flags().Var(&dumpForDryrunFormat, "dryrun-dump-format", "Dryrun dump format: "+dumpForDryrunFormat.AllowedString())
+ cmd.Flags().StringSliceVar(&dumpForDryrunAccts, "dryrun-accounts", nil, "additional accounts to include into dryrun request obj")
}
type cobraStringValue struct {
diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go
index 4bd72681e..387883582 100644
--- a/cmd/goal/multisig.go
+++ b/cmd/goal/multisig.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "os"
"github.com/spf13/cobra"
@@ -163,7 +164,7 @@ var signProgramCmd = &cobra.Command{
}
ops, err := logic.AssembleString(string(text))
if err != nil {
- ops.ReportProblems(programSource)
+ ops.ReportProblems(programSource, os.Stderr)
reportErrorf("%s: %s", programSource, err)
}
if outname == "" {
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index ed834f4ad..7d0136d10 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -242,7 +242,7 @@ var runCmd = &cobra.Command{
}
ops, err := logic.AssembleString(programStr)
if err != nil {
- ops.ReportProblems(teal)
+ ops.ReportProblems(teal, os.Stderr)
reportErrorf("Internal error, cannot assemble %v \n", programStr)
}
cfg.Program = ops.Program
diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go
index dca4a3ff9..77f4eb896 100644
--- a/cmd/tealdbg/cdtState.go
+++ b/cmd/tealdbg/cdtState.go
@@ -364,7 +364,11 @@ func prepareTxn(txn *transactions.Transaction, groupIndex int) []fieldDesc {
field == int(logic.Accounts) ||
field == int(logic.ApplicationArgs) ||
field == int(logic.Assets) ||
- field == int(logic.Applications) {
+ field == int(logic.Applications) ||
+ field == int(logic.CreatedApplicationID) ||
+ field == int(logic.CreatedAssetID) ||
+ field == int(logic.Logs) ||
+ field == int(logic.NumLogs) {
continue
}
var value string
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 6fe2d006a..aa5fb3aaf 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -524,20 +524,38 @@ func (r *LocalRunner) RunAll() error {
failed := 0
start := time.Now()
- for _, run := range r.runs {
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
-
+ pooledApplicationBudget := uint64(0)
+ credit, _ := transactions.FeeCredit(r.txnGroup, r.proto.MinTxnFee)
+ // ignore error since fees are not important for debugging in most cases
+
+ evalParams := make([]logic.EvalParams, len(r.runs))
+ for i, run := range r.runs {
+ if run.mode == modeStateful {
+ if r.proto.EnableAppCostPooling {
+ pooledApplicationBudget += uint64(r.proto.MaxAppProgramCost)
+ } else {
+ pooledApplicationBudget = uint64(r.proto.MaxAppProgramCost)
+ }
+ }
ep := logic.EvalParams{
- Proto: &r.proto,
- Debugger: r.debugger,
- Txn: &r.txnGroup[groupIndex],
- TxnGroup: r.txnGroup,
- GroupIndex: run.groupIndex,
- PastSideEffects: run.pastSideEffects,
- Specials: &transactions.SpecialAddresses{},
+ Proto: &r.proto,
+ Debugger: r.debugger,
+ Txn: &r.txnGroup[run.groupIndex],
+ TxnGroup: r.txnGroup,
+ GroupIndex: run.groupIndex,
+ PastSideEffects: run.pastSideEffects,
+ Specials: &transactions.SpecialAddresses{},
+ FeeCredit: &credit,
+ PooledApplicationBudget: &pooledApplicationBudget,
}
+ evalParams[i] = ep
+ }
+
+ for i := range r.runs {
+ run := &r.runs[i]
+ r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
- run.result.pass, run.result.err = run.eval(ep)
+ run.result.pass, run.result.err = run.eval(evalParams[i])
if run.result.err != nil {
failed++
}
@@ -555,25 +573,44 @@ func (r *LocalRunner) Run() (bool, error) {
return false, fmt.Errorf("no program to debug")
}
- run := r.runs[0]
+ pooledApplicationBudget := uint64(0)
+ credit, _ := transactions.FeeCredit(r.txnGroup, r.proto.MinTxnFee)
+ // ignore error since fees are not important for debugging in most cases
- ep := logic.EvalParams{
- Proto: &r.proto,
- Txn: &r.txnGroup[groupIndex],
- TxnGroup: r.txnGroup,
- GroupIndex: run.groupIndex,
- PastSideEffects: run.pastSideEffects,
- Specials: &transactions.SpecialAddresses{},
- }
+ evalParams := make([]logic.EvalParams, len(r.runs))
+ for i, run := range r.runs {
+ if run.mode == modeStateful {
+ if r.proto.EnableAppCostPooling {
+ pooledApplicationBudget += uint64(r.proto.MaxAppProgramCost)
+ } else {
+ pooledApplicationBudget = uint64(r.proto.MaxAppProgramCost)
+ }
+ }
+ ep := logic.EvalParams{
+ Proto: &r.proto,
+ Txn: &r.txnGroup[run.groupIndex],
+ TxnGroup: r.txnGroup,
+ GroupIndex: run.groupIndex,
+ PastSideEffects: run.pastSideEffects,
+ Specials: &transactions.SpecialAddresses{},
+ FeeCredit: &credit,
+ PooledApplicationBudget: &pooledApplicationBudget,
+ }
- // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
- // r.debugger = nil
- // ep.Debugger = r.debugger
- // if ep.Debugger != nil // FALSE
- if r.debugger != nil {
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
- ep.Debugger = r.debugger
+ // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
+ // r.debugger = nil
+ // ep.Debugger = r.debugger
+ // if ep.Debugger != nil // FALSE
+ if r.debugger != nil {
+ r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
+ ep.Debugger = r.debugger
+ }
+
+ evalParams[i] = ep
}
+ run := r.runs[0]
+ ep := evalParams[0]
+
return run.eval(ep)
}
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index 23e58e3f3..364186f55 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -32,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -280,7 +281,7 @@ func (l *localLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
-func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledger.TxLease) error {
+func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index d4d917a40..d78c402e8 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -18,6 +18,7 @@ package main
import (
"encoding/json"
+ "fmt"
"net/http"
"net/http/httptest"
"reflect"
@@ -226,14 +227,6 @@ func makeSampleBalanceRecord(addr basics.Address, assetIdx basics.AssetIndex, ap
return br
}
-func makeSampleSerializedBalanceRecord(addr basics.Address, toJSON bool) []byte {
- br := makeSampleBalanceRecord(addr, 50, 100)
- if toJSON {
- return protocol.EncodeJSON(&br)
- }
- return protocol.EncodeMsgp(&br)
-}
-
func TestBalanceJSONInput(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -325,7 +318,7 @@ func TestDebugEnvironment(t *testing.T) {
Txn: transactions.Transaction{
Header: transactions.Header{
Sender: sender,
- Fee: basics.MicroAlgos{Raw: 100},
+ Fee: basics.MicroAlgos{Raw: 1000},
Note: []byte{1, 2, 3},
},
ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
@@ -1087,7 +1080,7 @@ func TestDebugTxSubmit(t *testing.T) {
Type: protocol.ApplicationCallTx,
Header: transactions.Header{
Sender: sender,
- Fee: basics.MicroAlgos{Raw: 100},
+ Fee: basics.MicroAlgos{Raw: 1000},
Note: []byte{1, 2, 3},
},
ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
@@ -1138,3 +1131,322 @@ int 1`
a.NoError(err)
a.True(pass)
}
+
+func TestDebugFeePooling(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
+ a.NoError(err)
+
+ source := `#pragma version 5
+itxn_begin
+int pay
+itxn_field TypeEnum
+int 0
+itxn_field Amount
+txn Sender
+itxn_field Receiver
+itxn_submit
+int 1`
+
+ ops, err := logic.AssembleString(source)
+ a.NoError(err)
+ prog := ops.Program
+
+ stxn := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Note: []byte{1, 2, 3},
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 0,
+ ApprovalProgram: prog,
+ ClearStateProgram: prog,
+ },
+ },
+ }
+
+ appIdx := basics.AppIndex(1)
+ br := basics.BalanceRecord{
+ Addr: sender,
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 5000000},
+ AppParams: map[basics.AppIndex]basics.AppParams{
+ appIdx: {
+ ApprovalProgram: prog,
+ ClearStateProgram: prog,
+ },
+ },
+ },
+ }
+ balanceBlob := protocol.EncodeMsgp(&br)
+
+ // two testcase: success with enough fees and fail otherwise
+ var tests = []struct {
+ pass bool
+ fee uint64
+ }{
+ {true, 2000},
+ {false, 1500},
+ }
+ for _, test := range tests {
+ t.Run(fmt.Sprintf("fee=%d", test.fee), func(t *testing.T) {
+
+ stxn.Txn.Fee = basics.MicroAlgos{Raw: test.fee}
+ encoded := protocol.EncodeJSON(&stxn)
+
+ ds := DebugParams{
+ ProgramNames: []string{"test"},
+ BalanceBlob: balanceBlob,
+ TxnBlob: encoded,
+ Proto: string(protocol.ConsensusCurrentVersion),
+ Round: 222,
+ LatestTimestamp: 333,
+ GroupIndex: 0,
+ RunMode: "application",
+ AppID: uint64(appIdx),
+ }
+
+ local := MakeLocalRunner(nil)
+ err = local.Setup(&ds)
+ a.NoError(err)
+
+ pass, err := local.Run()
+ if test.pass {
+ a.NoError(err)
+ a.True(pass)
+ } else {
+ a.Error(err)
+ a.False(pass)
+ }
+ })
+ }
+}
+
+func TestDebugCostPooling(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
+ a.NoError(err)
+
+ // cost is 2000 (ecdsa_pk_recover) + 130 (keccak256) + 8 (rest opcodes)
+ // needs 4 app calls to pool together
+ source := `#pragma version 5
+byte "hello from ethereum" // msg
+keccak256
+int 0 // v
+byte 0x745e8f55ac6189ee89ed707c36694868e3903988fbf776c8096c45da2e60c638 // r
+byte 0x30c8e4a9b5d2eb53ddc6294587dd00bed8afe2c45dd72f6b4cf752e46d5ba681 // s
+ecdsa_pk_recover Secp256k1
+concat // convert public key X and Y to ethereum addr
+keccak256
+substring 12 32
+byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
+==
+`
+ ops, err := logic.AssembleString(source)
+ a.NoError(err)
+ prog := ops.Program
+
+ ops, err = logic.AssembleString("#pragma version 2\nint 1")
+ a.NoError(err)
+ trivial := ops.Program
+
+ stxn := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: 1000},
+ Note: []byte{1, 2, 3},
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 0,
+ ApprovalProgram: prog,
+ ClearStateProgram: trivial,
+ },
+ },
+ }
+
+ appIdx := basics.AppIndex(1)
+ trivialAppIdx := basics.AppIndex(2)
+ trivialStxn := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: 1000},
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: trivialAppIdx,
+ },
+ },
+ }
+
+ br := basics.BalanceRecord{
+ Addr: sender,
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 5000000},
+ AppParams: map[basics.AppIndex]basics.AppParams{
+ appIdx: {
+ ApprovalProgram: prog,
+ ClearStateProgram: trivial,
+ },
+ trivialAppIdx: {
+ ApprovalProgram: trivial,
+ ClearStateProgram: trivial,
+ },
+ },
+ },
+ }
+ balanceBlob := protocol.EncodeMsgp(&br)
+
+ var tests = []struct {
+ pass bool
+ additionalApps int
+ }{
+ {false, 2},
+ {true, 3},
+ }
+ for _, test := range tests {
+ t.Run(fmt.Sprintf("txn-count=%d", test.additionalApps+1), func(t *testing.T) {
+ txnBlob := protocol.EncodeMsgp(&stxn)
+ for i := 0; i < test.additionalApps; i++ {
+ val, err := getRandomAddress()
+ a.NoError(err)
+ trivialStxn.Txn.Note = val[:]
+ txnBlob = append(txnBlob, protocol.EncodeMsgp(&trivialStxn)...)
+ }
+
+ ds := DebugParams{
+ ProgramNames: []string{"test"},
+ BalanceBlob: balanceBlob,
+ TxnBlob: txnBlob,
+ Proto: string(protocol.ConsensusCurrentVersion),
+ Round: 222,
+ LatestTimestamp: 333,
+ GroupIndex: 0,
+ RunMode: "application",
+ AppID: uint64(appIdx),
+ }
+
+ local := MakeLocalRunner(nil)
+ err = local.Setup(&ds)
+ a.NoError(err)
+
+ pass, err := local.Run()
+ if test.pass {
+ a.NoError(err)
+ a.True(pass)
+ } else {
+ a.Error(err)
+ a.Contains(err.Error(), "dynamic cost budget exceeded")
+ a.False(pass)
+ }
+ })
+ }
+}
+
+func TestGroupTxnIdx(t *testing.T) {
+
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ddrBlob := `{
+ "accounts": [
+ {
+ "address": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "amount": 3999999999497000,
+ "amount-without-pending-rewards": 3999999999497000,
+ "created-apps": [
+ {
+ "id": 1,
+ "params": {
+ "approval-program": "BSABATEQIhJAABExEIEGEkAAByJAAAEAIkMiQ4EAQw==",
+ "clear-state-program": "BYEBQw==",
+ "creator": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ"
+ }
+ }
+ ],
+ "pending-rewards": 0,
+ "rewards": 0,
+ "round": 2,
+ "status": "Online"
+ },
+ {
+ "address": "WCS6TVPJRBSARHLN2326LRU5BYVJZUKI2VJ53CAWKYYHDE455ZGKANWMGM",
+ "amount": 500000,
+ "amount-without-pending-rewards": 500000,
+ "pending-rewards": 0,
+ "rewards": 0,
+ "round": 2,
+ "status": "Offline"
+ }
+ ],
+ "apps": [
+ {
+ "id": 1,
+ "params": {
+ "approval-program": "BSABATEQIhJAABExEIEGEkAAByJAAAEAIkMiQ4EAQw==",
+ "clear-state-program": "BYEBQw==",
+ "creator": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ"
+ }
+ }
+ ],
+ "latest-timestamp": 1634765269,
+ "protocol-version": "future",
+ "round": 2,
+ "sources": null,
+ "txns": [
+ {
+ "sig": "8Z/ECart3vFBSKp5sFuNRN4coliea4TE+xttZNn9E15DJ8GZ++kgtZKhG4Tiopv7r61Lqh8VBuyuTf9AC3uQBQ==",
+ "txn": {
+ "amt": 5000,
+ "fee": 1000,
+ "fv": 3,
+ "gen": "sandnet-v1",
+ "gh": "pjM5GFR9MpNkWIibcfqtu/a2OIZTBy/mSQc++sF1r0Q=",
+ "grp": "2ca4sSb5aGab0k065qIT3J3AcB5YWYezrRh6bLB0ve8=",
+ "lv": 1003,
+ "note": "V+GSPgDmLQo=",
+ "rcv": "WCS6TVPJRBSARHLN2326LRU5BYVJZUKI2VJ53CAWKYYHDE455ZGKANWMGM",
+ "snd": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "4/gj+6rllN/Uc55kAJ0BOKTzoUJKJ7gExE3vp7cr5vC9XVStx0QNZq1DFXLhpTZnTQAl3zOrGzIxfS5HOpSyCg==",
+ "txn": {
+ "apid": 1,
+ "fee": 1000,
+ "fv": 3,
+ "gh": "pjM5GFR9MpNkWIibcfqtu/a2OIZTBy/mSQc++sF1r0Q=",
+ "grp": "2ca4sSb5aGab0k065qIT3J3AcB5YWYezrRh6bLB0ve8=",
+ "lv": 1003,
+ "note": "+fl8jkXqyFc=",
+ "snd": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "type": "appl"
+ }
+ }
+ ]
+ }`
+
+ ds := DebugParams{
+ Proto: string(protocol.ConsensusCurrentVersion),
+ DdrBlob: []byte(ddrBlob),
+ GroupIndex: 0,
+ RunMode: "application",
+ }
+
+ local := MakeLocalRunner(nil)
+ err := local.Setup(&ds)
+ a.NoError(err)
+
+ pass, err := local.Run()
+ a.NoError(err)
+ a.True(pass)
+}
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 6a1004c27..ea09dd1ed 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -240,7 +240,7 @@ func debugLocal(args []string) {
if len(txnFile) > 0 {
txnBlob, err = ioutil.ReadFile(txnFile)
if err != nil {
- log.Fatalf("Error txn reading %s: %s", balanceFile, err)
+ log.Fatalf("Error txn reading %s: %s", txnFile, err)
}
}
diff --git a/cmd/updater/versionCmd.go b/cmd/updater/versionCmd.go
index 4b700ef1e..5b8345bf8 100644
--- a/cmd/updater/versionCmd.go
+++ b/cmd/updater/versionCmd.go
@@ -28,20 +28,26 @@ import (
var (
destFile string
versionBucket string
+ packageName string
specificVersion uint64
semanticOutput bool
)
+// DefaultPackageName is the package we'll use by default.
+const DefaultPackageName = "node"
+
func init() {
versionCmd.AddCommand(checkCmd)
versionCmd.AddCommand(getCmd)
- checkCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
checkCmd.Flags().BoolVarP(&semanticOutput, "semantic", "s", false, "Human readable semantic version output.")
+ checkCmd.Flags().StringVarP(&packageName, "package", "p", DefaultPackageName, "Get version of specific package.")
+ checkCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.Flags().StringVarP(&destFile, "outputFile", "o", "", "Path for downloaded file (required).")
- getCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.Flags().Uint64VarP(&specificVersion, "version", "v", 0, "Specific version to download.")
+ getCmd.Flags().StringVarP(&packageName, "package", "p", DefaultPackageName, "Get version of specific package.")
+ getCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.MarkFlagRequired("outputFile")
}
@@ -67,7 +73,7 @@ var checkCmd = &cobra.Command{
if err != nil {
exitErrorf("Error creating s3 session %s\n", err.Error())
} else {
- version, _, err := s3Session.GetLatestUpdateVersion(channel)
+ version, _, err := s3Session.GetPackageVersion(channel, packageName, 0)
if err != nil {
exitErrorf("Error getting latest version from s3 %s\n", err.Error())
}
@@ -102,7 +108,7 @@ var getCmd = &cobra.Command{
if err != nil {
exitErrorf("Error creating s3 session %s\n", err.Error())
} else {
- version, name, err := s3Session.GetUpdateVersion(channel, specificVersion)
+ version, name, err := s3Session.GetPackageVersion(channel, packageName, specificVersion)
if err != nil {
exitErrorf("Error getting latest version from s3 %s\n", err.Error())
}
diff --git a/compactcert/abstractions.go b/compactcert/abstractions.go
index 6a369dee1..ac02ec2c8 100644
--- a/compactcert/abstractions.go
+++ b/compactcert/abstractions.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
@@ -41,7 +41,7 @@ type Ledger interface {
Wait(basics.Round) chan struct{}
GenesisHash() crypto.Digest
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- CompactCertVoters(basics.Round) (*ledger.VotersForRound, error)
+ CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
}
// Network captures the aspects of the gossip network protocol that are
diff --git a/compactcert/worker.go b/compactcert/worker.go
index fb78a7300..d84f7848f 100644
--- a/compactcert/worker.go
+++ b/compactcert/worker.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -36,7 +36,7 @@ import (
type builder struct {
*compactcert.Builder
- voters *ledger.VotersForRound
+ voters *ledgercore.VotersForRound
votersHdr bookkeeping.BlockHeader
}
diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go
index e8b2554cc..adc784f0e 100644
--- a/compactcert/worker_test.go
+++ b/compactcert/worker_test.go
@@ -33,7 +33,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -122,8 +121,8 @@ func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, err
return hdr, nil
}
-func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledger.VotersForRound, error) {
- voters := &ledger.VotersForRound{
+func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
+ voters := &ledgercore.VotersForRound{
Proto: config.Consensus[protocol.ConsensusFuture],
AddrToPos: make(map[basics.Address]uint64),
TotalWeight: basics.MicroAlgos{Raw: uint64(s.totalWeight)},
@@ -131,7 +130,7 @@ func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledger.VotersForRo
for i, k := range s.keysForVoters {
voters.AddrToPos[k.Parent] = uint64(i)
- voters.Participants = append(voters.Participants, compactcert.Participant{
+ voters.Participants = append(voters.Participants, basics.Participant{
PK: k.Voting.OneTimeSignatureVerifier,
Weight: 1,
KeyDilution: config.Consensus[protocol.ConsensusFuture].DefaultKeyDilution,
@@ -385,22 +384,36 @@ func TestLatestSigsFromThisNode(t *testing.T) {
// Wait for a compact cert to be formed, so we know the signer thread is caught up.
_ = <-s.txmsg
- latestSigs, err := w.LatestSigsFromThisNode()
- require.NoError(t, err)
- require.Equal(t, len(latestSigs), len(keys))
+ var latestSigs map[basics.Address]basics.Round
+ var err error
+ for x := 0; x < 10; x++ {
+ latestSigs, err = w.LatestSigsFromThisNode()
+ require.NoError(t, err)
+ if len(latestSigs) == len(keys) {
+ break
+ }
+ time.Sleep(256 * time.Millisecond)
+ }
+ require.Equal(t, len(keys), len(latestSigs))
for _, k := range keys {
require.Equal(t, latestSigs[k.Parent], basics.Round(2*proto.CompactCertRounds))
}
// Add a block that claims the compact cert is formed.
+ s.mu.Lock()
s.addBlock(3 * basics.Round(proto.CompactCertRounds))
+ s.mu.Unlock()
// Wait for the builder to discard the signatures.
- time.Sleep(time.Second)
-
- latestSigs, err = w.LatestSigsFromThisNode()
- require.NoError(t, err)
- require.Equal(t, len(latestSigs), 0)
+ for x := 0; x < 10; x++ {
+ latestSigs, err = w.LatestSigsFromThisNode()
+ require.NoError(t, err)
+ if len(latestSigs) == 0 {
+ break
+ }
+ time.Sleep(256 * time.Millisecond)
+ }
+ require.Equal(t, 0, len(latestSigs))
}
func TestWorkerRestart(t *testing.T) {
diff --git a/config/config.go b/config/config.go
index 7ba869c06..315d53289 100644
--- a/config/config.go
+++ b/config/config.go
@@ -23,8 +23,6 @@ import (
"os"
"os/user"
"path/filepath"
- "strings"
- "time"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/codecs"
@@ -48,378 +46,6 @@ const Mainnet protocol.NetworkID = "mainnet"
// GenesisJSONFile is the name of the genesis.json file
const GenesisJSONFile = "genesis.json"
-// Local holds the per-node-instance configuration settings for the protocol.
-// !!! WARNING !!!
-//
-// These versioned struct tags need to be maintained CAREFULLY and treated
-// like UNIVERSAL CONSTANTS - they should not be modified once committed.
-//
-// New fields may be added to the Local struct, along with a version tag
-// denoting a new version. When doing so, also update the
-// test/testdata/configs/config-v{n}.json and call "make generate" to regenerate the constants.
-//
-// !!! WARNING !!!
-type Local struct {
- // Version tracks the current version of the defaults so we can migrate old -> new
- // This is specifically important whenever we decide to change the default value
- // for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"`
-
- // environmental (may be overridden)
- // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
- // are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
- Archival bool `version[0]:"false"`
-
- // gossipNode.go
- // how many peers to propagate to?
- GossipFanout int `version[0]:"4"`
- NetAddress string `version[0]:""`
-
- // 1 * time.Minute = 60000000000 ns
- ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"`
-
- // what we should tell people to connect to
- PublicAddress string `version[0]:""`
-
- MaxConnectionsPerIP int `version[3]:"30"`
-
- // 0 == disable
- PeerPingPeriodSeconds int `version[0]:"0"`
-
- // for https serving
- TLSCertFile string `version[0]:""`
- TLSKeyFile string `version[0]:""`
-
- // Logging
- BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
- // if this is 0, do not produce agreement.cadaver
- CadaverSizeTarget uint64 `version[0]:"1073741824"`
-
- // IncomingConnectionsLimit specifies the max number of long-lived incoming
- // connections. 0 means no connections allowed. -1 is unbounded.
- IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000"`
-
- // BroadcastConnectionsLimit specifies the number of connections that
- // will receive broadcast (gossip) messages from this node. If the
- // node has more connections than this number, it will send broadcasts
- // to the top connections by priority (outgoing connections first, then
- // by money held by peers based on their participation key). 0 means
- // no outgoing messages (not even transaction broadcasting to outgoing
- // peers). -1 means unbounded (default).
- BroadcastConnectionsLimit int `version[4]:"-1"`
-
- // AnnounceParticipationKey specifies that this node should announce its
- // participation key (with the largest stake) to its gossip peers. This
- // allows peers to prioritize our connection, if necessary, in case of a
- // DoS attack. Disabling this means that the peers will not have any
- // additional information to allow them to prioritize our connection.
- AnnounceParticipationKey bool `version[4]:"true"`
-
- // PriorityPeers specifies peer IP addresses that should always get
- // outgoing broadcast messages from this node.
- PriorityPeers map[string]bool `version[4]:""`
-
- // To make sure the algod process does not run out of FDs, algod ensures
- // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
- // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
- // to leave room for short-lived FDs like DNS queries, SQLite files, etc.
- ReservedFDs uint64 `version[2]:"256"`
-
- // local server
- // API endpoint address
- EndpointAddress string `version[0]:"127.0.0.1:0"`
-
- // timeouts passed to the rest http.Server implementation
- RestReadTimeoutSeconds int `version[4]:"15"`
- RestWriteTimeoutSeconds int `version[4]:"120"`
-
- // SRV-based phonebook
- DNSBootstrapID string `version[0]:"<network>.algorand.network"`
-
- // Log file size limit in bytes
- LogSizeLimit uint64 `version[0]:"1073741824"`
-
- // text/template for creating log archive filename.
- // Available template vars:
- // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
- // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
- //
- // If the filename ends with .gz or .bz2 it will be compressed.
- //
- // default: "node.archive.log" (no rotation, clobbers previous archive)
- LogArchiveName string `version[4]:"node.archive.log"`
-
- // LogArchiveMaxAge will be parsed by time.ParseDuration().
- // Valid units are 's' seconds, 'm' minutes, 'h' hours
- LogArchiveMaxAge string `version[4]:""`
-
- // number of consecutive attempts to catchup after which we replace the peers we're connected to
- CatchupFailurePeerRefreshRate int `version[0]:"10"`
-
- // where should the node exporter listen for metrics
- NodeExporterListenAddress string `version[0]:":9100"`
-
- // enable metric reporting flag
- EnableMetricReporting bool `version[0]:"false"`
-
- // enable top accounts reporting flag
- EnableTopAccountsReporting bool `version[0]:"false"`
-
- // enable agreement reporting flag. Currently only prints additional period events.
- EnableAgreementReporting bool `version[3]:"false"`
-
- // enable agreement timing metrics flag
- EnableAgreementTimeMetrics bool `version[3]:"false"`
-
- // The path to the node exporter.
- NodeExporterPath string `version[0]:"./node_exporter"`
-
- // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
- FallbackDNSResolverAddress string `version[0]:""`
-
- // exponential increase factor of transaction pool's fee threshold, should always be 2 in production
- TxPoolExponentialIncreaseFactor uint64 `version[0]:"2"`
-
- SuggestedFeeBlockHistory int `version[0]:"3"`
-
- // TxPoolSize is the number of transactions that fit in the transaction pool
- TxPoolSize int `version[0]:"50000" version[5]:"15000"`
-
- // number of seconds allowed for syncing transactions
- TxSyncTimeoutSeconds int64 `version[0]:"30"`
-
- // number of seconds between transaction synchronizations
- TxSyncIntervalSeconds int64 `version[0]:"60"`
-
- // the number of incoming message hashes buckets.
- IncomingMessageFilterBucketCount int `version[0]:"5"`
-
- // the size of each incoming message hash bucket.
- IncomingMessageFilterBucketSize int `version[0]:"512"`
-
- // the number of outgoing message hashes buckets.
- OutgoingMessageFilterBucketCount int `version[0]:"3"`
-
- // the size of each outgoing message hash bucket.
- OutgoingMessageFilterBucketSize int `version[0]:"128"`
-
- // enable the filtering of outgoing messages
- EnableOutgoingNetworkMessageFiltering bool `version[0]:"true"`
-
- // enable the filtering of incoming messages
- EnableIncomingMessageFilter bool `version[0]:"false"`
-
- // control enabling / disabling deadlock detection.
- // negative (-1) to disable, positive (1) to enable, 0 for default.
- DeadlockDetection int `version[1]:"0"`
-
- // Prefer to run algod Hosted (under algoh)
- // Observed by `goal` for now.
- RunHosted bool `version[3]:"false"`
-
- // The maximal number of blocks that catchup will fetch in parallel.
- // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
- // Setting this variable to 0 would disable the catchup
- CatchupParallelBlocks uint64 `version[3]:"50" version[5]:"16"`
-
- // Generate AssembleBlockMetrics telemetry event
- EnableAssembleStats bool `version[0]:""`
-
- // Generate ProcessBlockMetrics telemetry event
- EnableProcessBlockStats bool `version[0]:""`
-
- // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
- SuggestedFeeSlidingWindowSize uint32 `version[3]:"50"`
-
- // the max size the sync server would return
- TxSyncServeResponseSize int `version[3]:"1000000"`
-
- // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
- // Note -- Indexer cannot operate on non Archival nodes
- IsIndexerActive bool `version[3]:"false"`
-
- // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
- // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
- // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
- // field can be used.
- UseXForwardedForAddressField string `version[0]:""`
-
- // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
- ForceRelayMessages bool `version[0]:"false"`
-
- // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
- // see ConnectionsRateLimitingCount description for further information. Providing a zero value
- // in this variable disables the connection rate limiting.
- ConnectionsRateLimitingWindowSeconds uint `version[4]:"1"`
-
- // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
- // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
- // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
- // value, the connection is refused.
- ConnectionsRateLimitingCount uint `version[4]:"60"`
-
- // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
- EnableRequestLogger bool `version[4]:"false"`
-
- // PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the
- // telemetry ( when enabled ). Defined in seconds.
- PeerConnectionsUpdateInterval int `version[5]:"3600"`
-
- // EnableProfiler enables the go pprof endpoints, should be false if
- // the algod api will be exposed to untrusted individuals
- EnableProfiler bool `version[0]:"false"`
-
- // TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
- TelemetryToLog bool `version[5]:"true"`
-
- // DNSSecurityFlags instructs algod validating DNS responses.
- // Possible fla values
- // 0x00 - disabled
- // 0x01 (dnssecSRV) - validate SRV response
- // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
- // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
- // ...
- DNSSecurityFlags uint32 `version[6]:"1"`
-
- // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
- EnablePingHandler bool `version[6]:"true"`
-
- // DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
- // allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
- DisableOutgoingConnectionThrottling bool `version[5]:"false"`
-
- // NetworkProtocolVersion overrides network protocol version ( if present )
- NetworkProtocolVersion string `version[6]:""`
-
- // CatchpointInterval sets the interval at which catchpoint are being generated. Setting this to 0 disables the catchpoint from being generated.
- // See CatchpointTracking for more details.
- CatchpointInterval uint64 `version[7]:"10000"`
-
- // CatchpointFileHistoryLength defines how many catchpoint files we want to store back.
- // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
- CatchpointFileHistoryLength int `version[7]:"365"`
-
- // EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the catchpoint catchup.
- EnableLedgerService bool `version[7]:"false"`
-
- // EnableBlockService enables the block serving service. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the catchup.
- EnableBlockService bool `version[7]:"false"`
-
- // EnableGossipBlockService enables the block serving service over the gossip network. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the relays to perform catchup from nodes.
- EnableGossipBlockService bool `version[8]:"true"`
-
- // CatchupHTTPBlockFetchTimeoutSec controls how long the http query for fetching a block from a relay would take before giving up and trying another relay.
- CatchupHTTPBlockFetchTimeoutSec int `version[9]:"4"`
-
- // CatchupGossipBlockFetchTimeoutSec controls how long the gossip query for fetching a block from a relay would take before giving up and trying another relay.
- CatchupGossipBlockFetchTimeoutSec int `version[9]:"4"`
-
- // CatchupLedgerDownloadRetryAttempts controls the number of attempt the ledger fetching would be attempted before giving up catching up to the provided catchpoint.
- CatchupLedgerDownloadRetryAttempts int `version[9]:"50"`
-
- // CatchupLedgerDownloadRetryAttempts controls the number of attempt the block fetching would be attempted before giving up catching up to the provided catchpoint.
- CatchupBlockDownloadRetryAttempts int `version[9]:"1000"`
-
- // EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
- // This functionality is disabled by default.
- EnableDeveloperAPI bool `version[9]:"false"`
-
- // OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
- // on algod startup.
- OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
-
- // CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
- // A value of -1 means "don't track catchpoints".
- // A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
- // A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
- // Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
- CatchpointTracking int64 `version[11]:"0"`
-
- // LedgerSynchronousMode defines the synchronous mode used by the ledger database. The supported options are:
- // 0 - SQLite continues without syncing as soon as it has handed data off to the operating system.
- // 1 - SQLite database engine will still sync at the most critical moments, but less often than in FULL mode.
- // 2 - SQLite database engine will use the xSync method of the VFS to ensure that all content is safely written to the disk surface prior to continuing. On Mac OS, the data is additionally syncronized via fullfsync.
- // 3 - In addition to what being done in 2, it provides additional durability if the commit is followed closely by a power loss.
- // for further information see the description of SynchronousMode in dbutil.go
- LedgerSynchronousMode int `version[12]:"2"`
-
- // AccountsRebuildSynchronousMode defines the synchronous mode used by the ledger database while the account database is being rebuilt. This is not a typical operational usecase,
- // and is expected to happen only on either startup ( after enabling the catchpoint interval, or on certain database upgrades ) or during fast catchup. The values specified here
- // and their meanings are identical to the ones in LedgerSynchronousMode.
- AccountsRebuildSynchronousMode int `version[12]:"1"`
-
- // MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
- // shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
- // configuration value, and it's independent of the actual catchpoint file size.
- MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
-
- // MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
- // provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
- // the default of 20480 would be used.
- MinCatchpointFileDownloadBytesPerSecond uint64 `version[13]:"20480"`
-
- // TraceServer is a host:port to report graph propagation trace info to.
- NetworkMessageTraceServer string `version[13]:""`
-
- // VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
- VerifiedTranscationsCacheSize int `version[14]:"30000"`
-
- // EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
- // When enabled, the catchup service would use the archive servers before falling back to the relays.
- // On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
- // archive server to pick from, and therefore automatically selects one of the relay nodes.
- EnableCatchupFromArchiveServers bool `version[15]:"false"`
-
- // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
- // connections that are originating from the local machine. Setting this to "true", allow to create large
- // local-machine networks that won't trip the incoming connection limit observed by relays.
- DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
-
- // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
- // redirect the http requests to in case it does not have the round. If it is not specified, will check
- // EnableBlockServiceFallbackToArchiver.
- BlockServiceCustomFallbackEndpoints string `version[16]:""`
-
- // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
- // an archiver or return StatusNotFound (404) when in does not have the requested round, and
- // BlockServiceCustomFallbackEndpoints is empty.
- // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
- EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
-
- // CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
- // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
- // This field is a bit-field with:
- // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation
- // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation
- // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block
- // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification
- // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are:
- // 0 : default behavior.
- // 3 : speed up catchup by skipping necessary validations
- // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against
- // previously used executabled, and would not provide any additional security guarantees.
- CatchupBlockValidateMode int `version[16]:"0"`
-
- // Generate AccountUpdates telemetry event
- EnableAccountUpdatesStats bool `version[16]:"false"`
-
- // Time interval in nanoseconds for generating accountUpdates telemetry event
- AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"`
-
- // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation
- // keys have been placed on the genesis directory.
- ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"`
-
- // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful
- // when we have a single-node private network, where there is no other nodes that need to be communicated with.
- // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner
- // working would be completly dis-functional.
- DisableNetworking bool `version[16]:"false"`
-}
-
// Filenames of config files within the configdir (e.g. ~/.algorand)
// ConfigFilename is the name of the config.json file where we store per-algod-instance settings
@@ -439,6 +65,10 @@ const CrashFilename = "crash.sqlite"
// It is used to track in-progress compact certificates.
const CompactCertFilename = "compactcert.sqlite"
+// ParticipationRegistryFilename is the name of the participation registry database file.
+// It is used for tracking participation key metadata.
+const ParticipationRegistryFilename = "partregistry.sqlite"
+
// ConfigurableConsensusProtocolsFilename defines a set of consensus prototocols that
// are to be loaded from the data directory ( if present ), to override the
// built-in supported consensus protocols.
@@ -501,48 +131,6 @@ func loadConfig(reader io.Reader, config *Local) error {
return dec.Decode(config)
}
-// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
-func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
- dnsBootstrapString := cfg.DNSBootstrap(networkID)
- bootstrapArray = strings.Split(dnsBootstrapString, ";")
- // omit zero length entries from the result set.
- for i := len(bootstrapArray) - 1; i >= 0; i-- {
- if len(bootstrapArray[i]) == 0 {
- bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
- }
- }
- return
-}
-
-// DNSBootstrap returns the network-specific DNSBootstrap identifier
-func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
- // if user hasn't modified the default DNSBootstrapID in the configuration
- // file and we're targeting a devnet ( via genesis file ), we the
- // explicit devnet network bootstrap.
- if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
- if network == Devnet {
- return "devnet.algodev.network"
- } else if network == Betanet {
- return "betanet.algodev.network"
- }
- }
- return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
-}
-
-// SaveToDisk writes the Local settings into a root/ConfigFilename file
-func (cfg Local) SaveToDisk(root string) error {
- configpath := filepath.Join(root, ConfigFilename)
- filename := os.ExpandEnv(configpath)
- return cfg.SaveToFile(filename)
-}
-
-// SaveToFile saves the config to a specific filename, allowing overriding the default name
-func (cfg Local) SaveToFile(filename string) error {
- var alwaysInclude []string
- alwaysInclude = append(alwaysInclude, "Version")
- return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
-}
-
type phonebookBlackWhiteList struct {
Include []string
}
@@ -649,47 +237,9 @@ const (
dnssecTelemetryAddr
)
-// DNSSecuritySRVEnforced returns true if SRV response verification enforced
-func (cfg Local) DNSSecuritySRVEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecSRV != 0
-}
-
-// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
-func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
-}
-
-// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
-func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
-}
-
-// ProposalAssemblyTime is the max amount of time to spend on generating a proposal block. This should eventually have it's own configurable value.
-const ProposalAssemblyTime time.Duration = 250 * time.Millisecond
-
const (
catchupValidationModeCertificate = 1
catchupValidationModePaysetHash = 2
catchupValidationModeVerifyTransactionSignatures = 4
catchupValidationModeVerifyApplyData = 8
)
-
-// CatchupVerifyCertificate returns true if certificate verification is needed
-func (cfg Local) CatchupVerifyCertificate() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0
-}
-
-// CatchupVerifyPaysetHash returns true if payset hash verification is needed
-func (cfg Local) CatchupVerifyPaysetHash() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0
-}
-
-// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed
-func (cfg Local) CatchupVerifyTransactionSignatures() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0
-}
-
-// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed
-func (cfg Local) CatchupVerifyApplyData() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0
-}
diff --git a/config/consensus.go b/config/consensus.go
index c9589006e..f0ffa1a51 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -391,6 +391,10 @@ type ConsensusParams struct {
EnableKeyregCoherencyCheck bool
EnableExtraPagesOnAppUpdate bool
+
+ // MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need
+ // to be taken offline, that would be proposed to be taken offline.
+ MaxProposedExpiredOnlineAccounts int
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -465,6 +469,10 @@ var MaxExtraAppProgramLen int
//supported supported by any of the consensus protocols. used for decoding purposes.
var MaxAvailableAppProgramLen int
+// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need
+// to be taken offline, that would be proposed to be taken offline.
+var MaxProposedExpiredOnlineAccounts int
+
func checkSetMax(value int, curMax *int) {
if value > *curMax {
*curMax = value
@@ -501,6 +509,7 @@ func checkSetAllocBounds(p ConsensusParams) {
// Its value is much larger than any possible reasonable MaxLogCalls value in future
checkSetMax(p.MaxAppProgramLen, &MaxLogCalls)
checkSetMax(p.MaxInnerTransactions, &MaxInnerTransactions)
+ checkSetMax(p.MaxProposedExpiredOnlineAccounts, &MaxProposedExpiredOnlineAccounts)
}
// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
@@ -1042,6 +1051,11 @@ func initConsensusProtocols() {
vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
vFuture.CompactCertSecKQ = 128
+ // Enable TEAL 6 / AVM 1.1
+ vFuture.LogicSigVersion = 6
+
+ vFuture.MaxProposedExpiredOnlineAccounts = 32
+
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go
index df5f4e14f..70a8e7b13 100644
--- a/config/defaultsGenerator/defaultsGenerator.go
+++ b/config/defaultsGenerator/defaultsGenerator.go
@@ -39,7 +39,7 @@ var jsonExampleFileName = flag.String("j", "", "Name of the json example file")
var autoGenHeader = `
// This file was auto generated by ./config/defaultsGenerator/defaultsGenerator.go, and SHOULD NOT BE MODIFIED in any way
-// If you want to make changes to this file, make the corresponding changes to Local in config.go and run "go generate".
+// If you want to make changes to this file, make the corresponding changes to Local in localTemplate.go and run "go generate".
`
// printExit prints the given formatted string ( i.e. just like fmt.Printf ), with the defaultGenerator executable program name
diff --git a/config/localTemplate.go b/config/localTemplate.go
new file mode 100644
index 000000000..20a141e8c
--- /dev/null
+++ b/config/localTemplate.go
@@ -0,0 +1,500 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package config
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/codecs"
+)
+
+// Local holds the per-node-instance configuration settings for the protocol.
+// !!! WARNING !!!
+//
+// These versioned struct tags need to be maintained CAREFULLY and treated
+// like UNIVERSAL CONSTANTS - they should not be modified once committed.
+//
+// New fields may be added to the Local struct, along with a version tag
+// denoting a new version. When doing so, also update the
+// test/testdata/configs/config-v{n}.json and call "make generate" to regenerate the constants.
+//
+// !!! WARNING !!!
+type Local struct {
+ // Version tracks the current version of the defaults so we can migrate old -> new
+ // This is specifically important whenever we decide to change the default value
+ // for an existing parameter. This field tag must be updated any time we add a new version.
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19"`
+
+ // environmental (may be overridden)
+ // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
+ // are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
+ Archival bool `version[0]:"false"`
+
+ // gossipNode.go
+ // how many peers to propagate to?
+ GossipFanout int `version[0]:"4"`
+ NetAddress string `version[0]:""`
+
+ // 1 * time.Minute = 60000000000 ns
+ ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"`
+
+ // what we should tell people to connect to
+ PublicAddress string `version[0]:""`
+
+ MaxConnectionsPerIP int `version[3]:"30"`
+
+ // 0 == disable
+ PeerPingPeriodSeconds int `version[0]:"0"`
+
+ // for https serving
+ TLSCertFile string `version[0]:""`
+ TLSKeyFile string `version[0]:""`
+
+ // Logging
+ BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
+ // if this is 0, do not produce agreement.cadaver
+ CadaverSizeTarget uint64 `version[0]:"1073741824"`
+
+ // IncomingConnectionsLimit specifies the max number of long-lived incoming
+ // connections. 0 means no connections allowed. -1 is unbounded.
+ // Estimating 5MB per incoming connection, 5MB*800 = 4GB
+ IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800"`
+
+ // BroadcastConnectionsLimit specifies the number of connections that
+ // will receive broadcast (gossip) messages from this node. If the
+ // node has more connections than this number, it will send broadcasts
+ // to the top connections by priority (outgoing connections first, then
+ // by money held by peers based on their participation key). 0 means
+ // no outgoing messages (not even transaction broadcasting to outgoing
+ // peers). -1 means unbounded (default).
+ BroadcastConnectionsLimit int `version[4]:"-1"`
+
+ // AnnounceParticipationKey specifies that this node should announce its
+ // participation key (with the largest stake) to its gossip peers. This
+ // allows peers to prioritize our connection, if necessary, in case of a
+ // DoS attack. Disabling this means that the peers will not have any
+ // additional information to allow them to prioritize our connection.
+ AnnounceParticipationKey bool `version[4]:"true"`
+
+ // PriorityPeers specifies peer IP addresses that should always get
+ // outgoing broadcast messages from this node.
+ PriorityPeers map[string]bool `version[4]:""`
+
+ // To make sure the algod process does not run out of FDs, algod ensures
+ // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
+ // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
+ // to leave room for short-lived FDs like DNS queries, SQLite files, etc.
+ ReservedFDs uint64 `version[2]:"256"`
+
+ // local server
+ // API endpoint address
+ EndpointAddress string `version[0]:"127.0.0.1:0"`
+
+ // timeouts passed to the rest http.Server implementation
+ RestReadTimeoutSeconds int `version[4]:"15"`
+ RestWriteTimeoutSeconds int `version[4]:"120"`
+
+ // SRV-based phonebook
+ DNSBootstrapID string `version[0]:"<network>.algorand.network"`
+
+ // Log file size limit in bytes
+ LogSizeLimit uint64 `version[0]:"1073741824"`
+
+ // text/template for creating log archive filename.
+ // Available template vars:
+ // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
+ // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
+ //
+ // If the filename ends with .gz or .bz2 it will be compressed.
+ //
+ // default: "node.archive.log" (no rotation, clobbers previous archive)
+ LogArchiveName string `version[4]:"node.archive.log"`
+
+ // LogArchiveMaxAge will be parsed by time.ParseDuration().
+ // Valid units are 's' seconds, 'm' minutes, 'h' hours
+ LogArchiveMaxAge string `version[4]:""`
+
+ // number of consecutive attempts to catchup after which we replace the peers we're connected to
+ CatchupFailurePeerRefreshRate int `version[0]:"10"`
+
+ // where should the node exporter listen for metrics
+ NodeExporterListenAddress string `version[0]:":9100"`
+
+ // enable metric reporting flag
+ EnableMetricReporting bool `version[0]:"false"`
+
+ // enable top accounts reporting flag
+ EnableTopAccountsReporting bool `version[0]:"false"`
+
+ // enable agreement reporting flag. Currently only prints additional period events.
+ EnableAgreementReporting bool `version[3]:"false"`
+
+ // enable agreement timing metrics flag
+ EnableAgreementTimeMetrics bool `version[3]:"false"`
+
+ // The path to the node exporter.
+ NodeExporterPath string `version[0]:"./node_exporter"`
+
+ // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
+ FallbackDNSResolverAddress string `version[0]:""`
+
+ // exponential increase factor of transaction pool's fee threshold, should always be 2 in production
+ TxPoolExponentialIncreaseFactor uint64 `version[0]:"2"`
+
+ SuggestedFeeBlockHistory int `version[0]:"3"`
+
+ // TxPoolSize is the number of transactions that fit in the transaction pool
+ TxPoolSize int `version[0]:"50000" version[5]:"15000"`
+
+ // number of seconds allowed for syncing transactions
+ TxSyncTimeoutSeconds int64 `version[0]:"30"`
+
+ // number of seconds between transaction synchronizations
+ TxSyncIntervalSeconds int64 `version[0]:"60"`
+
+ // the number of incoming message hashes buckets.
+ IncomingMessageFilterBucketCount int `version[0]:"5"`
+
+ // the size of each incoming message hash bucket.
+ IncomingMessageFilterBucketSize int `version[0]:"512"`
+
+ // the number of outgoing message hashes buckets.
+ OutgoingMessageFilterBucketCount int `version[0]:"3"`
+
+ // the size of each outgoing message hash bucket.
+ OutgoingMessageFilterBucketSize int `version[0]:"128"`
+
+ // enable the filtering of outgoing messages
+ EnableOutgoingNetworkMessageFiltering bool `version[0]:"true"`
+
+ // enable the filtering of incoming messages
+ EnableIncomingMessageFilter bool `version[0]:"false"`
+
+ // control enabling / disabling deadlock detection.
+ // negative (-1) to disable, positive (1) to enable, 0 for default.
+ DeadlockDetection int `version[1]:"0"`
+
+ // Prefer to run algod Hosted (under algoh)
+ // Observed by `goal` for now.
+ RunHosted bool `version[3]:"false"`
+
+ // The maximal number of blocks that catchup will fetch in parallel.
+ // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
+ // Setting this variable to 0 would disable the catchup
+ CatchupParallelBlocks uint64 `version[3]:"50" version[5]:"16"`
+
+ // Generate AssembleBlockMetrics telemetry event
+ EnableAssembleStats bool `version[0]:""`
+
+ // Generate ProcessBlockMetrics telemetry event
+ EnableProcessBlockStats bool `version[0]:""`
+
+ // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
+ SuggestedFeeSlidingWindowSize uint32 `version[3]:"50"`
+
+ // the max size the sync server would return
+ TxSyncServeResponseSize int `version[3]:"1000000"`
+
+ // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
+ // Note -- Indexer cannot operate on non Archival nodes
+ IsIndexerActive bool `version[3]:"false"`
+
+ // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
+ // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
+ // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
+ // field can be used.
+ UseXForwardedForAddressField string `version[0]:""`
+
+ // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
+ ForceRelayMessages bool `version[0]:"false"`
+
+ // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
+ // see ConnectionsRateLimitingCount description for further information. Providing a zero value
+ // in this variable disables the connection rate limiting.
+ ConnectionsRateLimitingWindowSeconds uint `version[4]:"1"`
+
+ // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
+ // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
+ // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
+ // value, the connection is refused.
+ ConnectionsRateLimitingCount uint `version[4]:"60"`
+
+ // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
+ EnableRequestLogger bool `version[4]:"false"`
+
+ // PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the
+ // telemetry ( when enabled ). Defined in seconds.
+ PeerConnectionsUpdateInterval int `version[5]:"3600"`
+
+ // EnableProfiler enables the go pprof endpoints, should be false if
+ // the algod api will be exposed to untrusted individuals
+ EnableProfiler bool `version[0]:"false"`
+
+ // TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
+ TelemetryToLog bool `version[5]:"true"`
+
+ // DNSSecurityFlags instructs algod validating DNS responses.
+ // Possible fla values
+ // 0x00 - disabled
+ // 0x01 (dnssecSRV) - validate SRV response
+ // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
+ // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
+ // ...
+ DNSSecurityFlags uint32 `version[6]:"1"`
+
+ // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
+ EnablePingHandler bool `version[6]:"true"`
+
+ // DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
+ // allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
+ DisableOutgoingConnectionThrottling bool `version[5]:"false"`
+
+ // NetworkProtocolVersion overrides network protocol version ( if present )
+ NetworkProtocolVersion string `version[6]:""`
+
+ // CatchpointInterval sets the interval at which catchpoint are being generated. Setting this to 0 disables the catchpoint from being generated.
+ // See CatchpointTracking for more details.
+ CatchpointInterval uint64 `version[7]:"10000"`
+
+ // CatchpointFileHistoryLength defines how many catchpoint files we want to store back.
+ // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
+ CatchpointFileHistoryLength int `version[7]:"365"`
+
+ // EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the catchpoint catchup.
+ EnableLedgerService bool `version[7]:"false"`
+
+ // EnableBlockService enables the block serving service. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the catchup.
+ EnableBlockService bool `version[7]:"false"`
+
+ // EnableGossipBlockService enables the block serving service over the gossip network. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the relays to perform catchup from nodes.
+ EnableGossipBlockService bool `version[8]:"true"`
+
+ // CatchupHTTPBlockFetchTimeoutSec controls how long the http query for fetching a block from a relay would take before giving up and trying another relay.
+ CatchupHTTPBlockFetchTimeoutSec int `version[9]:"4"`
+
+ // CatchupGossipBlockFetchTimeoutSec controls how long the gossip query for fetching a block from a relay would take before giving up and trying another relay.
+ CatchupGossipBlockFetchTimeoutSec int `version[9]:"4"`
+
+ // CatchupLedgerDownloadRetryAttempts controls the number of attempt the ledger fetching would be attempted before giving up catching up to the provided catchpoint.
+ CatchupLedgerDownloadRetryAttempts int `version[9]:"50"`
+
+ // CatchupLedgerDownloadRetryAttempts controls the number of attempt the block fetching would be attempted before giving up catching up to the provided catchpoint.
+ CatchupBlockDownloadRetryAttempts int `version[9]:"1000"`
+
+ // EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
+ // This functionality is disabled by default.
+ EnableDeveloperAPI bool `version[9]:"false"`
+
+ // OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
+ // on algod startup.
+ OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
+
+ // CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
+ // A value of -1 means "don't track catchpoints".
+ // A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
+ // A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
+ // Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
+ CatchpointTracking int64 `version[11]:"0"`
+
+ // LedgerSynchronousMode defines the synchronous mode used by the ledger database. The supported options are:
+ // 0 - SQLite continues without syncing as soon as it has handed data off to the operating system.
+ // 1 - SQLite database engine will still sync at the most critical moments, but less often than in FULL mode.
+ // 2 - SQLite database engine will use the xSync method of the VFS to ensure that all content is safely written to the disk surface prior to continuing. On Mac OS, the data is additionally syncronized via fullfsync.
+ // 3 - In addition to what being done in 2, it provides additional durability if the commit is followed closely by a power loss.
+ // for further information see the description of SynchronousMode in dbutil.go
+ LedgerSynchronousMode int `version[12]:"2"`
+
+ // AccountsRebuildSynchronousMode defines the synchronous mode used by the ledger database while the account database is being rebuilt. This is not a typical operational usecase,
+ // and is expected to happen only on either startup ( after enabling the catchpoint interval, or on certain database upgrades ) or during fast catchup. The values specified here
+ // and their meanings are identical to the ones in LedgerSynchronousMode.
+ AccountsRebuildSynchronousMode int `version[12]:"1"`
+
+ // MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
+ // shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
+ // configuration value, and it's independent of the actual catchpoint file size.
+ MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
+
+ // MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
+ // provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
+ // the default of 20480 would be used.
+ MinCatchpointFileDownloadBytesPerSecond uint64 `version[13]:"20480"`
+
+ // TraceServer is a host:port to report graph propagation trace info to.
+ NetworkMessageTraceServer string `version[13]:""`
+
+ // VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
+ VerifiedTranscationsCacheSize int `version[14]:"30000"`
+
+ // EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
+ // When enabled, the catchup service would use the archive servers before falling back to the relays.
+ // On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
+ // archive server to pick from, and therefore automatically selects one of the relay nodes.
+ EnableCatchupFromArchiveServers bool `version[15]:"false"`
+
+ // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
+ // connections that are originating from the local machine. Setting this to "true", allow to create large
+ // local-machine networks that won't trip the incoming connection limit observed by relays.
+ DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
+
+ // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
+ // redirect the http requests to in case it does not have the round. If it is not specified, will check
+ // EnableBlockServiceFallbackToArchiver.
+ BlockServiceCustomFallbackEndpoints string `version[16]:""`
+
+ // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
+ // an archiver or return StatusNotFound (404) when in does not have the requested round, and
+ // BlockServiceCustomFallbackEndpoints is empty.
+ // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
+ EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
+
+ // CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
+ // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
+ // This field is a bit-field with:
+ // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation
+ // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation
+ // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block
+ // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification
+ // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are:
+ // 0 : default behavior.
+ // 3 : speed up catchup by skipping necessary validations
+ // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against
+ // previously used executabled, and would not provide any additional security guarantees.
+ CatchupBlockValidateMode int `version[16]:"0"`
+
+ // Generate AccountUpdates telemetry event
+ EnableAccountUpdatesStats bool `version[16]:"false"`
+
+ // Time interval in nanoseconds for generating accountUpdates telemetry event
+ AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"`
+
+ // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation
+ // keys have been placed on the genesis directory.
+ ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"`
+
+ // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful
+ // when we have a single-node private network, where there is no other nodes that need to be communicated with.
+ // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner
+ // working would be completly dis-functional.
+ DisableNetworking bool `version[16]:"false"`
+
+ // ForceFetchTransactions allows to explicitly configure a node to retrieve all the transactions
+ // into it's transaction pool, even if those would not be required as the node doesn't
+ // participate in the consensus or used to relay transactions.
+ ForceFetchTransactions bool `version[17]:"false"`
+
+ // EnableVerbosedTransactionSyncLogging enables the transaction sync to write extensive
+ // message exchange information to the log file. This option is disabled by default,
+ // so that the log files would not grow too rapidly.
+ EnableVerbosedTransactionSyncLogging bool `version[17]:"false"`
+
+ // TransactionSyncDataExchangeRate overrides the auto-calculated data exchange rate between each
+ // two peers. The unit of the data exchange rate is in bytes per second. Setting the value to
+ // zero implies allowing the transaction sync to dynamically calculate the value.
+ TransactionSyncDataExchangeRate uint64 `version[17]:"0"`
+
+ // TransactionSyncSignificantMessageThreshold define the threshold used for a transaction sync
+ // message before it can be used for calculating the data exchange rate. Setting this to zero
+ // would use the default values. The threshold is defined in units of bytes.
+ TransactionSyncSignificantMessageThreshold uint64 `version[17]:"0"`
+
+ // ProposalAssemblyTime is the max amount of time to spend on generating a proposal block.
+ ProposalAssemblyTime time.Duration `version[19]:"250000000"`
+}
+
+// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
+func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
+ dnsBootstrapString := cfg.DNSBootstrap(networkID)
+ bootstrapArray = strings.Split(dnsBootstrapString, ";")
+ // omit zero length entries from the result set.
+ for i := len(bootstrapArray) - 1; i >= 0; i-- {
+ if len(bootstrapArray[i]) == 0 {
+ bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
+ }
+ }
+ return
+}
+
+// DNSBootstrap returns the network-specific DNSBootstrap identifier
+func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
+ // if user hasn't modified the default DNSBootstrapID in the configuration
+ // file and we're targeting a devnet ( via genesis file ), we the
+ // explicit devnet network bootstrap.
+ if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
+ if network == Devnet {
+ return "devnet.algodev.network"
+ } else if network == Betanet {
+ return "betanet.algodev.network"
+ }
+ }
+ return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
+}
+
+// SaveToDisk writes the Local settings into a root/ConfigFilename file
+func (cfg Local) SaveToDisk(root string) error {
+ configpath := filepath.Join(root, ConfigFilename)
+ filename := os.ExpandEnv(configpath)
+ return cfg.SaveToFile(filename)
+}
+
+// SaveToFile saves the config to a specific filename, allowing overriding the default name
+func (cfg Local) SaveToFile(filename string) error {
+ var alwaysInclude []string
+ alwaysInclude = append(alwaysInclude, "Version")
+ return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
+}
+
+// DNSSecuritySRVEnforced returns true if SRV response verification enforced
+func (cfg Local) DNSSecuritySRVEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecSRV != 0
+}
+
+// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
+}
+
+// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
+}
+
+// CatchupVerifyCertificate returns true if certificate verification is needed
+func (cfg Local) CatchupVerifyCertificate() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0
+}
+
+// CatchupVerifyPaysetHash returns true if payset hash verification is needed
+func (cfg Local) CatchupVerifyPaysetHash() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0
+}
+
+// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed
+func (cfg Local) CatchupVerifyTransactionSignatures() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0
+}
+
+// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed
+func (cfg Local) CatchupVerifyApplyData() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0
+}
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 725409e73..ae1aa7043 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -15,99 +15,104 @@
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
// This file was auto generated by ./config/defaultsGenerator/defaultsGenerator.go, and SHOULD NOT BE MODIFIED in any way
-// If you want to make changes to this file, make the corresponding changes to Local in config.go and run "go generate".
+// If you want to make changes to this file, make the corresponding changes to Local in localTemplate.go and run "go generate".
package config
var defaultLocal = Local{
- Version: 16,
- AccountUpdatesStatsInterval: 5000000000,
- AccountsRebuildSynchronousMode: 1,
- AnnounceParticipationKey: true,
- Archival: false,
- BaseLoggerDebugLevel: 4,
- BlockServiceCustomFallbackEndpoints: "",
- BroadcastConnectionsLimit: -1,
- CadaverSizeTarget: 1073741824,
- CatchpointFileHistoryLength: 365,
- CatchpointInterval: 10000,
- CatchpointTracking: 0,
- CatchupBlockDownloadRetryAttempts: 1000,
- CatchupBlockValidateMode: 0,
- CatchupFailurePeerRefreshRate: 10,
- CatchupGossipBlockFetchTimeoutSec: 4,
- CatchupHTTPBlockFetchTimeoutSec: 4,
- CatchupLedgerDownloadRetryAttempts: 50,
- CatchupParallelBlocks: 16,
- ConnectionsRateLimitingCount: 60,
- ConnectionsRateLimitingWindowSeconds: 1,
- DNSBootstrapID: "<network>.algorand.network",
- DNSSecurityFlags: 1,
- DeadlockDetection: 0,
- DisableLocalhostConnectionRateLimit: true,
- DisableNetworking: false,
- DisableOutgoingConnectionThrottling: false,
- EnableAccountUpdatesStats: false,
- EnableAgreementReporting: false,
- EnableAgreementTimeMetrics: false,
- EnableAssembleStats: false,
- EnableBlockService: false,
- EnableBlockServiceFallbackToArchiver: true,
- EnableCatchupFromArchiveServers: false,
- EnableDeveloperAPI: false,
- EnableGossipBlockService: true,
- EnableIncomingMessageFilter: false,
- EnableLedgerService: false,
- EnableMetricReporting: false,
- EnableOutgoingNetworkMessageFiltering: true,
- EnablePingHandler: true,
- EnableProcessBlockStats: false,
- EnableProfiler: false,
- EnableRequestLogger: false,
- EnableTopAccountsReporting: false,
- EndpointAddress: "127.0.0.1:0",
- FallbackDNSResolverAddress: "",
- ForceRelayMessages: false,
- GossipFanout: 4,
- IncomingConnectionsLimit: 10000,
- IncomingMessageFilterBucketCount: 5,
- IncomingMessageFilterBucketSize: 512,
- IsIndexerActive: false,
- LedgerSynchronousMode: 2,
- LogArchiveMaxAge: "",
- LogArchiveName: "node.archive.log",
- LogSizeLimit: 1073741824,
- MaxCatchpointDownloadDuration: 7200000000000,
- MaxConnectionsPerIP: 30,
- MinCatchpointFileDownloadBytesPerSecond: 20480,
- NetAddress: "",
- NetworkMessageTraceServer: "",
- NetworkProtocolVersion: "",
- NodeExporterListenAddress: ":9100",
- NodeExporterPath: "./node_exporter",
- OptimizeAccountsDatabaseOnStartup: false,
- OutgoingMessageFilterBucketCount: 3,
- OutgoingMessageFilterBucketSize: 128,
- ParticipationKeysRefreshInterval: 60000000000,
- PeerConnectionsUpdateInterval: 3600,
- PeerPingPeriodSeconds: 0,
- PriorityPeers: map[string]bool{},
- PublicAddress: "",
- ReconnectTime: 60000000000,
- ReservedFDs: 256,
- RestReadTimeoutSeconds: 15,
- RestWriteTimeoutSeconds: 120,
- RunHosted: false,
- SuggestedFeeBlockHistory: 3,
- SuggestedFeeSlidingWindowSize: 50,
- TLSCertFile: "",
- TLSKeyFile: "",
- TelemetryToLog: true,
- TxPoolExponentialIncreaseFactor: 2,
- TxPoolSize: 15000,
- TxSyncIntervalSeconds: 60,
- TxSyncServeResponseSize: 1000000,
- TxSyncTimeoutSeconds: 30,
- UseXForwardedForAddressField: "",
- VerifiedTranscationsCacheSize: 30000,
+ Version: 19,
+ AccountUpdatesStatsInterval: 5000000000,
+ AccountsRebuildSynchronousMode: 1,
+ AnnounceParticipationKey: true,
+ Archival: false,
+ BaseLoggerDebugLevel: 4,
+ BlockServiceCustomFallbackEndpoints: "",
+ BroadcastConnectionsLimit: -1,
+ CadaverSizeTarget: 1073741824,
+ CatchpointFileHistoryLength: 365,
+ CatchpointInterval: 10000,
+ CatchpointTracking: 0,
+ CatchupBlockDownloadRetryAttempts: 1000,
+ CatchupBlockValidateMode: 0,
+ CatchupFailurePeerRefreshRate: 10,
+ CatchupGossipBlockFetchTimeoutSec: 4,
+ CatchupHTTPBlockFetchTimeoutSec: 4,
+ CatchupLedgerDownloadRetryAttempts: 50,
+ CatchupParallelBlocks: 16,
+ ConnectionsRateLimitingCount: 60,
+ ConnectionsRateLimitingWindowSeconds: 1,
+ DNSBootstrapID: "<network>.algorand.network",
+ DNSSecurityFlags: 1,
+ DeadlockDetection: 0,
+ DisableLocalhostConnectionRateLimit: true,
+ DisableNetworking: false,
+ DisableOutgoingConnectionThrottling: false,
+ EnableAccountUpdatesStats: false,
+ EnableAgreementReporting: false,
+ EnableAgreementTimeMetrics: false,
+ EnableAssembleStats: false,
+ EnableBlockService: false,
+ EnableBlockServiceFallbackToArchiver: true,
+ EnableCatchupFromArchiveServers: false,
+ EnableDeveloperAPI: false,
+ EnableGossipBlockService: true,
+ EnableIncomingMessageFilter: false,
+ EnableLedgerService: false,
+ EnableMetricReporting: false,
+ EnableOutgoingNetworkMessageFiltering: true,
+ EnablePingHandler: true,
+ EnableProcessBlockStats: false,
+ EnableProfiler: false,
+ EnableRequestLogger: false,
+ EnableTopAccountsReporting: false,
+ EnableVerbosedTransactionSyncLogging: false,
+ EndpointAddress: "127.0.0.1:0",
+ FallbackDNSResolverAddress: "",
+ ForceFetchTransactions: false,
+ ForceRelayMessages: false,
+ GossipFanout: 4,
+ IncomingConnectionsLimit: 800,
+ IncomingMessageFilterBucketCount: 5,
+ IncomingMessageFilterBucketSize: 512,
+ IsIndexerActive: false,
+ LedgerSynchronousMode: 2,
+ LogArchiveMaxAge: "",
+ LogArchiveName: "node.archive.log",
+ LogSizeLimit: 1073741824,
+ MaxCatchpointDownloadDuration: 7200000000000,
+ MaxConnectionsPerIP: 30,
+ MinCatchpointFileDownloadBytesPerSecond: 20480,
+ NetAddress: "",
+ NetworkMessageTraceServer: "",
+ NetworkProtocolVersion: "",
+ NodeExporterListenAddress: ":9100",
+ NodeExporterPath: "./node_exporter",
+ OptimizeAccountsDatabaseOnStartup: false,
+ OutgoingMessageFilterBucketCount: 3,
+ OutgoingMessageFilterBucketSize: 128,
+ ParticipationKeysRefreshInterval: 60000000000,
+ PeerConnectionsUpdateInterval: 3600,
+ PeerPingPeriodSeconds: 0,
+ PriorityPeers: map[string]bool{},
+ ProposalAssemblyTime: 250000000,
+ PublicAddress: "",
+ ReconnectTime: 60000000000,
+ ReservedFDs: 256,
+ RestReadTimeoutSeconds: 15,
+ RestWriteTimeoutSeconds: 120,
+ RunHosted: false,
+ SuggestedFeeBlockHistory: 3,
+ SuggestedFeeSlidingWindowSize: 50,
+ TLSCertFile: "",
+ TLSKeyFile: "",
+ TelemetryToLog: true,
+ TransactionSyncDataExchangeRate: 0,
+ TransactionSyncSignificantMessageThreshold: 0,
+ TxPoolExponentialIncreaseFactor: 2,
+ TxPoolSize: 15000,
+ TxSyncIntervalSeconds: 60,
+ TxSyncServeResponseSize: 1000000,
+ TxSyncTimeoutSeconds: 30,
+ UseXForwardedForAddressField: "",
+ VerifiedTranscationsCacheSize: 30000,
}
diff --git a/config/version.go b/config/version.go
index 1980378d9..61d4ff58a 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 0
+const VersionMinor = 2
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index 71df901d7..c7f84fa45 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -29,7 +29,7 @@ const minBatchVerifierAlloc = 16
// Batch verifications errors
var (
- ErrBatchVerificationFailed = errors.New("At least on signature didn't pass verification")
+ ErrBatchVerificationFailed = errors.New("At least one signature didn't pass verification")
ErrZeroTranscationsInBatch = errors.New("Could not validate empty signature set")
)
diff --git a/crypto/compactcert/builder.go b/crypto/compactcert/builder.go
index f830a40ff..5966ca9a7 100644
--- a/crypto/compactcert/builder.go
+++ b/crypto/compactcert/builder.go
@@ -45,7 +45,7 @@ type Builder struct {
sigs []sigslot // Indexed by pos in participants
sigsHasValidL bool // The L values in sigs are consistent with weights
signedWeight uint64 // Total weight of signatures so far
- participants []Participant
+ participants []basics.Participant
parttree *merklearray.Tree
// Cached cert, if Build() was called and no subsequent
@@ -57,7 +57,7 @@ type Builder struct {
// to be signed, as well as other security parameters, are specified in
// param. The participants that will sign the message are in part and
// parttree.
-func MkBuilder(param Params, part []Participant, parttree *merklearray.Tree) (*Builder, error) {
+func MkBuilder(param Params, part []basics.Participant, parttree *merklearray.Tree) (*Builder, error) {
npart := len(part)
b := &Builder{
diff --git a/crypto/compactcert/builder_test.go b/crypto/compactcert/builder_test.go
index 0bab55da7..13738b5c3 100644
--- a/crypto/compactcert/builder_test.go
+++ b/crypto/compactcert/builder_test.go
@@ -36,7 +36,7 @@ func (m TestMessage) ToBeHashed() (protocol.HashID, []byte) {
}
type PartCommit struct {
- participants []Participant
+ participants []basics.Participant
}
func (pc PartCommit) Length() uint64 {
@@ -78,10 +78,10 @@ func TestBuildVerify(t *testing.T) {
// Share the key; we allow the same vote key to appear in multiple accounts..
key := crypto.GenerateOneTimeSignatureSecrets(0, 1)
- var parts []Participant
+ var parts []basics.Participant
var sigs []crypto.OneTimeSignature
for i := 0; i < npartHi; i++ {
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / 2 / npartHi),
KeyDilution: 10000,
@@ -91,7 +91,7 @@ func TestBuildVerify(t *testing.T) {
}
for i := 0; i < npartLo; i++ {
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / 2 / npartLo),
KeyDilution: 10000,
@@ -165,12 +165,12 @@ func BenchmarkBuildVerify(b *testing.B) {
SecKQ: 128,
}
- var parts []Participant
+ var parts []basics.Participant
var partkeys []*crypto.OneTimeSignatureSecrets
var sigs []crypto.OneTimeSignature
for i := 0; i < npart; i++ {
key := crypto.GenerateOneTimeSignatureSecrets(0, 1)
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / npart),
KeyDilution: 10000,
diff --git a/crypto/compactcert/msgp_gen.go b/crypto/compactcert/msgp_gen.go
index a883b1cfa..2f2301653 100644
--- a/crypto/compactcert/msgp_gen.go
+++ b/crypto/compactcert/msgp_gen.go
@@ -26,14 +26,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// Participant
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// Reveal
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -663,158 +655,6 @@ func (z *CompactOneTimeSignature) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *Participant) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if (*z).KeyDilution == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).PK.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).Weight == 0 {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "d"
- o = append(o, 0xa1, 0x64)
- o = msgp.AppendUint64(o, (*z).KeyDilution)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "p"
- o = append(o, 0xa1, 0x70)
- o = (*z).PK.MarshalMsg(o)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "w"
- o = append(o, 0xa1, 0x77)
- o = msgp.AppendUint64(o, (*z).Weight)
- }
- }
- return
-}
-
-func (_ *Participant) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*Participant)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *Participant) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).PK.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PK")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Weight")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = Participant{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "p":
- bts, err = (*z).PK.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PK")
- return
- }
- case "w":
- (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Weight")
- return
- }
- case "d":
- (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "KeyDilution")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *Participant) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*Participant)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *Participant) Msgsize() (s int) {
- s = 1 + 2 + (*z).PK.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *Participant) MsgIsZero() bool {
- return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0) && ((*z).KeyDilution == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
func (z *Reveal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/crypto/compactcert/msgp_gen_test.go b/crypto/compactcert/msgp_gen_test.go
index 5b292879e..23ebb5a3f 100644
--- a/crypto/compactcert/msgp_gen_test.go
+++ b/crypto/compactcert/msgp_gen_test.go
@@ -132,66 +132,6 @@ func BenchmarkUnmarshalCompactOneTimeSignature(b *testing.B) {
}
}
-func TestMarshalUnmarshalParticipant(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := Participant{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingParticipant(t *testing.T) {
- protocol.RunEncodingTest(t, &Participant{})
-}
-
-func BenchmarkMarshalMsgParticipant(b *testing.B) {
- v := Participant{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgParticipant(b *testing.B) {
- v := Participant{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalParticipant(b *testing.B) {
- v := Participant{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
func TestMarshalUnmarshalReveal(t *testing.T) {
partitiontest.PartitionTest(t)
v := Reveal{}
diff --git a/crypto/compactcert/structs.go b/crypto/compactcert/structs.go
index 69faac522..1e02e4eaf 100644
--- a/crypto/compactcert/structs.go
+++ b/crypto/compactcert/structs.go
@@ -30,34 +30,6 @@ type Params struct {
SecKQ uint64 // Security parameter (k+q) from analysis document
}
-// A Participant corresponds to an account whose AccountData.Status
-// is Online, and for which the expected sigRound satisfies
-// AccountData.VoteFirstValid <= sigRound <= AccountData.VoteLastValid.
-//
-// In the Algorand ledger, it is possible for multiple accounts to have
-// the same PK. Thus, the PK is not necessarily unique among Participants.
-// However, each account will produce a unique Participant struct, to avoid
-// potential DoS attacks where one account claims to have the same VoteID PK
-// as another account.
-type Participant struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- // PK is AccountData.VoteID.
- PK crypto.OneTimeSignatureVerifier `codec:"p"`
-
- // Weight is AccountData.MicroAlgos.
- Weight uint64 `codec:"w"`
-
- // KeyDilution is AccountData.KeyDilution() with the protocol for sigRound
- // as expected by the Builder.
- KeyDilution uint64 `codec:"d"`
-}
-
-// ToBeHashed implements the crypto.Hashable interface.
-func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.CompactCertPart, protocol.Encode(&p)
-}
-
// CompactOneTimeSignature is crypto.OneTimeSignature with omitempty
type CompactOneTimeSignature struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
@@ -87,8 +59,8 @@ func (ssc sigslotCommit) ToBeHashed() (protocol.HashID, []byte) {
type Reveal struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- SigSlot sigslotCommit `codec:"s"`
- Part Participant `codec:"p"`
+ SigSlot sigslotCommit `codec:"s"`
+ Part basics.Participant `codec:"p"`
}
// maxReveals is a bound on allocation and on numReveals to limit log computation
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 46e15396f..57b719d5a 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -462,59 +462,224 @@
}
}
},
- "/v2/register-participation-keys/{address}": {
+ "/v2/participation": {
+
+ "get": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Return a list of participation keys",
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Return a list of participation keys",
+ "operationId": "GetParticipationKeys",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/ParticipationKeysResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Application Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+
+
"post": {
- "description": "Generate (or renew) and register participation keys on the node for a given account address.",
+
"tags": [
"private"
],
- "operationId": "RegisterParticipationKeys",
+
+ "consumes": [
+ "application/msgpack"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Add a participation key to the node",
+ "operationId": "AddParticipationKey",
"parameters": [
{
- "type": "string",
- "description": "The `account-id` to update, or `all` to update all accounts.",
- "name": "address",
- "in": "path",
- "required": true
+ "description": "The participation key to add to the node",
+ "name": "participationkey",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "binary"
+ }
+ }
+ ],
+
+ "responses": {
+ "200": {
+ "$ref": "#/responses/PostParticipationResponse"
},
- {
- "type": "integer",
- "default": 1000,
- "description": "The fee to use when submitting key registration transactions. Defaults to the suggested fee.",
- "name": "fee",
- "in": "query"
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "integer",
- "description": "value to use for two-level participation key.",
- "name": "key-dilution",
- "in": "query"
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "integer",
- "description": "The last round for which the generated participation keys will be valid.",
- "name": "round-last-valid",
- "in": "query"
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "boolean",
- "description": "Don't wait for transaction to commit before returning response.",
- "name": "no-wait",
- "in": "query"
+ "503": {
+ "description": "Service Temporarily Unavailable",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+
+ }
+
+ },
+ "/v2/participation/{participation-id}": {
+ "delete": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Delete a given participation key by id",
+ "produces": [
+ "application/json"
+ ],
+
+ "schemes": [
+ "http"
+ ],
+ "summary": "Delete a given participation key by id",
+ "operationId": "DeleteParticipationKeyByID",
+ "responses": {
+ "200": {
+ "$ref": "#/responses/DeleteParticipationIdResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
}
+ }
+
+ },
+
+ "get": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Given a participation id, return information about that participation key",
+ "produces": [
+ "application/json"
],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get participation key info by id",
+ "operationId": "GetParticipationKeyByID",
"responses": {
"200": {
"description": "OK",
- "$ref": "#/responses/PostTransactionsResponse"
+ "$ref": "#/responses/ParticipationKeyResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Application Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
}
}
},
"parameters": [
{
"type": "string",
- "description": "Account address where keys will be registered.",
- "name": "address",
+ "name": "participation-id",
"in": "path",
"required": true
}
@@ -1496,6 +1661,52 @@
}
}
},
+ "ParticipationKey": {
+ "description": "Represents a participation key used by the node.",
+ "type": "object",
+ "required": [
+ "id",
+ "key",
+ "address"
+ ],
+ "properties": {
+ "id": {
+ "description": "The key's ParticipationID.",
+ "type": "string"
+ },
+ "address": {
+ "description": "Address the key was generated for.",
+ "type": "string",
+ "x-algorand-format": "Address"
+ },
+ "effective-first-valid": {
+ "description": "When registered, this is the first round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "effective-last-valid": {
+ "description": "When registered, this is the last round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "last-vote": {
+ "description": "Round when this key was last used to vote.",
+ "type": "integer"
+ },
+ "last-block-proposal": {
+ "description": "Round when this key was last used to propose a block.",
+ "type": "integer"
+ },
+ "last-state-proof": {
+ "description": "Round when this key was last used to generate a state proof.",
+ "type": "integer"
+ },
+ "key": {
+ "description": "Key information stored on the account.",
+ "$ref": "#/definitions/AccountParticipation"
+ }
+ }
+ },
"TealKeyValueStore": {
"description": "Represents a key-value store for use in an application.",
"type": "array",
@@ -1529,7 +1740,7 @@
],
"properties": {
"type": {
- "description": "\\[tt\\] value type.",
+ "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**",
"type": "integer"
},
"bytes": {
@@ -1657,7 +1868,7 @@
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state-schema": {
- "description": "[\\lsch\\] global schema",
+ "description": "[\\gsch\\] global schema",
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state": {
@@ -2366,6 +2577,51 @@
}
}
},
+
+ "ParticipationKeysResponse": {
+ "description": "A list of participation keys",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ParticipationKey"
+ }
+ }
+ },
+ "ParticipationKeyResponse": {
+ "description": "A detailed description of a participation id",
+ "schema": {
+ "type": "object",
+ "required": [
+ "participationKey"
+ ],
+ "properties": {
+ "participationKey": {
+ "description": "Detailed description of a participation key",
+ "type": "string"
+ }
+ }
+ }
+ },
+ "DeleteParticipationIdResponse" : {
+ "description": "Participation key got deleted by ID"
+ },
+ "PostParticipationResponse" : {
+ "description": "Participation ID of the submission",
+ "schema": {
+ "type": "object",
+ "required": [
+ "partId"
+ ],
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ }
+ }
+
+ },
+
"PostTransactionsResponse": {
"description": "Transaction ID of the submission.",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 423b23180..a8a4d9ea8 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -341,6 +341,10 @@
},
"description": "Teal compile Result"
},
+ "DeleteParticipationIdResponse": {
+ "content": {},
+ "description": "Participation key got deleted by ID"
+ },
"DryrunResponse": {
"content": {
"application/json": {
@@ -453,6 +457,38 @@
}
}
},
+ "ParticipationKeyResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "participationKey": {
+ "description": "Detailed description of a participation key",
+ "type": "string"
+ }
+ },
+ "required": [
+ "participationKey"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "A detailed description of a participation id"
+ },
+ "ParticipationKeysResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "items": {
+ "$ref": "#/components/schemas/ParticipationKey"
+ },
+ "type": "array"
+ }
+ }
+ },
+ "description": "A list of participation keys"
+ },
"PendingTransactionsResponse": {
"content": {
"application/json": {
@@ -483,6 +519,25 @@
},
"description": "A potentially truncated list of transactions currently in the node's transaction pool. You can compute whether or not the list is truncated if the number of elements in the **top-transactions** array is fewer than **total-transactions**."
},
+ "PostParticipationResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "partId"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Participation ID of the submission"
+ },
"PostTransactionsResponse": {
"content": {
"application/json": {
@@ -1270,6 +1325,51 @@
],
"type": "object"
},
+ "ParticipationKey": {
+ "description": "Represents a participation key used by the node.",
+ "properties": {
+ "address": {
+ "description": "Address the key was generated for.",
+ "type": "string",
+ "x-algorand-format": "Address"
+ },
+ "effective-first-valid": {
+ "description": "When registered, this is the first round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "effective-last-valid": {
+ "description": "When registered, this is the last round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "id": {
+ "description": "The key's ParticipationID.",
+ "type": "string"
+ },
+ "key": {
+ "$ref": "#/components/schemas/AccountParticipation"
+ },
+ "last-block-proposal": {
+ "description": "Round when this key was last used to propose a block.",
+ "type": "integer"
+ },
+ "last-state-proof": {
+ "description": "Round when this key was last used to generate a state proof.",
+ "type": "integer"
+ },
+ "last-vote": {
+ "description": "Round when this key was last used to vote.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "address",
+ "id",
+ "key"
+ ],
+ "type": "object"
+ },
"PendingTransactionResponse": {
"description": "Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details.",
"properties": {
@@ -1386,7 +1486,7 @@
"type": "string"
},
"type": {
- "description": "\\[tt\\] value type.",
+ "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**",
"type": "integer"
},
"uint": {
@@ -2440,51 +2540,229 @@
"summary": "Get the current supply reported by the ledger."
}
},
- "/v2/register-participation-keys/{address}": {
+ "/v2/participation": {
+ "get": {
+ "description": "Return a list of participation keys",
+ "operationId": "GetParticipationKeys",
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "items": {
+ "$ref": "#/components/schemas/ParticipationKey"
+ },
+ "type": "array"
+ }
+ }
+ },
+ "description": "A list of participation keys"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Application Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Return a list of participation keys",
+ "tags": [
+ "private"
+ ]
+ },
"post": {
- "description": "Generate (or renew) and register participation keys on the node for a given account address.",
- "operationId": "RegisterParticipationKeys",
+ "operationId": "AddParticipationKey",
+ "requestBody": {
+ "content": {
+ "application/msgpack": {
+ "schema": {
+ "format": "binary",
+ "type": "string"
+ }
+ }
+ },
+ "description": "The participation key to add to the node",
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "partId"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Participation ID of the submission"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Service Temporarily Unavailable"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Add a participation key to the node",
+ "tags": [
+ "private"
+ ],
+ "x-codegen-request-body-name": "participationkey"
+ }
+ },
+ "/v2/participation/{participation-id}": {
+ "delete": {
+ "description": "Delete a given participation key by id",
+ "operationId": "DeleteParticipationKeyByID",
"parameters": [
{
- "description": "The `account-id` to update, or `all` to update all accounts.",
"in": "path",
- "name": "address",
+ "name": "participation-id",
"required": true,
"schema": {
"type": "string"
}
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {},
+ "description": "Participation key got deleted by ID"
},
- {
- "description": "The fee to use when submitting key registration transactions. Defaults to the suggested fee.",
- "in": "query",
- "name": "fee",
- "schema": {
- "default": 1000,
- "type": "integer"
- }
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
},
- {
- "description": "value to use for two-level participation key.",
- "in": "query",
- "name": "key-dilution",
- "schema": {
- "type": "integer"
- }
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
},
- {
- "description": "The last round for which the generated participation keys will be valid.",
- "in": "query",
- "name": "round-last-valid",
- "schema": {
- "type": "integer"
- }
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
},
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Delete a given participation key by id",
+ "tags": [
+ "private"
+ ]
+ },
+ "get": {
+ "description": "Given a participation id, return information about that participation key",
+ "operationId": "GetParticipationKeyByID",
+ "parameters": [
{
- "description": "Don't wait for transaction to commit before returning response.",
- "in": "query",
- "name": "no-wait",
+ "in": "path",
+ "name": "participation-id",
+ "required": true,
"schema": {
- "type": "boolean"
+ "type": "string"
}
}
],
@@ -2494,21 +2772,66 @@
"application/json": {
"schema": {
"properties": {
- "txId": {
- "description": "encoding of the transaction hash.",
+ "participationKey": {
+ "description": "Detailed description of a participation key",
"type": "string"
}
},
"required": [
- "txId"
+ "participationKey"
],
"type": "object"
}
}
},
- "description": "Transaction ID of the submission."
+ "description": "A detailed description of a participation id"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Application Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
}
},
+ "summary": "Get participation key info by id",
"tags": [
"private"
]
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 626bec0b7..f57c0d0be 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -59,9 +59,10 @@ const (
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
- "/v1/transactions": true,
- "/v2/teal/dryrun": true,
- "/v2/teal/compile": true,
+ "/v1/transactions": true,
+ "/v2/teal/dryrun": true,
+ "/v2/teal/compile": true,
+ "/v2/participation": true,
}
// unauthorizedRequestError is generated when we receive 401 error from the server. This error includes the inner error
@@ -604,3 +605,21 @@ func (client RestClient) Proof(txid string, round uint64) (response generatedV2.
err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), nil)
return
}
+
+// PostParticipationKey sends a key file to the node.
+func (client RestClient) PostParticipationKey(file []byte) (response generatedV2.PostParticipationResponse, err error) {
+ err = client.post(&response, "/v2/participation", file)
+ return
+}
+
+// GetParticipationKeys gets all of the participation keys
+func (client RestClient) GetParticipationKeys() (response generatedV2.ParticipationKeysResponse, err error) {
+ err = client.get(&response, "/v2/participation", nil)
+ return
+}
+
+// GetParticipationKeyByID gets a single participation key
+func (client RestClient) GetParticipationKeyByID(participationID string) (response generatedV2.ParticipationKeyResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/participation/%s", participationID), nil)
+ return
+}
diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go
index 0384e887e..9d2d34934 100644
--- a/daemon/algod/api/server/v1/handlers/handlers.go
+++ b/daemon/algod/api/server/v1/handlers/handlers.go
@@ -1637,8 +1637,7 @@ func GetSupply(ctx lib.ReqContext, context echo.Context) {
w := context.Response().Writer
- latest := ctx.Node.Ledger().Latest()
- totals, err := ctx.Node.Ledger().Totals(latest)
+ latest, totals, err := ctx.Node.Ledger().LatestTotals()
if err != nil {
err = fmt.Errorf("GetSupply(): round %d failed: %v", latest, err)
lib.ErrorResponse(w, http.StatusInternalServerError, err, errInternalFailure, ctx.Log)
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 3677c4b28..04279ce67 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/protocol"
@@ -256,7 +257,7 @@ func (dl *dryrunLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
return bookkeeping.BlockHeader{}, nil
}
-func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledger.TxLease) error {
+func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index bb1f8bbc1..3123e5d7c 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -38,4 +38,5 @@ var (
errFailedToAbortCatchup = "failed to abort catchup : %v"
errFailedToStartCatchup = "failed to start catchup : %v"
errOperationNotAvailableDuringCatchup = "operation not available during catchup"
+ errRESTPayloadZeroLength = "payload was of zero length"
)
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 3e6a3591a..a061b310c 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -23,9 +23,18 @@ type ServerInterface interface {
// Starts a catchpoint catchup.
// (POST /v2/catchup/{catchpoint})
StartCatchup(ctx echo.Context, catchpoint string) error
-
- // (POST /v2/register-participation-keys/{address})
- RegisterParticipationKeys(ctx echo.Context, address string, params RegisterParticipationKeysParams) error
+ // Return a list of participation keys
+ // (GET /v2/participation)
+ GetParticipationKeys(ctx echo.Context) error
+ // Add a participation key to the node
+ // (POST /v2/participation)
+ AddParticipationKey(ctx echo.Context) error
+ // Delete a given participation key by id
+ // (DELETE /v2/participation/{participation-id})
+ DeleteParticipationKeyByID(ctx echo.Context, participationId string) error
+ // Get participation key info by id
+ // (GET /v2/participation/{participation-id})
+ GetParticipationKeyByID(ctx echo.Context, participationId string) error
// (POST /v2/shutdown)
ShutdownNode(ctx echo.Context, params ShutdownNodeParams) error
@@ -96,15 +105,11 @@ func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error {
return err
}
-// RegisterParticipationKeys converts echo context to params.
-func (w *ServerInterfaceWrapper) RegisterParticipationKeys(ctx echo.Context) error {
+// GetParticipationKeys converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error {
validQueryParams := map[string]bool{
- "pretty": true,
- "fee": true,
- "key-dilution": true,
- "round-last-valid": true,
- "no-wait": true,
+ "pretty": true,
}
// Check for unknown query parameters.
@@ -115,60 +120,94 @@ func (w *ServerInterfaceWrapper) RegisterParticipationKeys(ctx echo.Context) err
}
var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
ctx.Set("api_key.Scopes", []string{""})
- // Parameter object where we will unmarshal all parameters from the context
- var params RegisterParticipationKeysParams
- // ------------- Optional query parameter "fee" -------------
- if paramValue := ctx.QueryParam("fee"); paramValue != "" {
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetParticipationKeys(ctx)
+ return err
+}
+// AddParticipationKey converts echo context to params.
+func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
}
- err = runtime.BindQueryParameter("form", true, false, "fee", ctx.QueryParams(), &params.Fee)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter fee: %s", err))
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- // ------------- Optional query parameter "key-dilution" -------------
- if paramValue := ctx.QueryParam("key-dilution"); paramValue != "" {
+ var err error
- }
+ ctx.Set("api_key.Scopes", []string{""})
- err = runtime.BindQueryParameter("form", true, false, "key-dilution", ctx.QueryParams(), &params.KeyDilution)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter key-dilution: %s", err))
- }
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AddParticipationKey(ctx)
+ return err
+}
+
+// DeleteParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) error {
- // ------------- Optional query parameter "round-last-valid" -------------
- if paramValue := ctx.QueryParam("round-last-valid"); paramValue != "" {
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- err = runtime.BindQueryParameter("form", true, false, "round-last-valid", ctx.QueryParams(), &params.RoundLastValid)
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round-last-valid: %s", err))
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
}
- // ------------- Optional query parameter "no-wait" -------------
- if paramValue := ctx.QueryParam("no-wait"); paramValue != "" {
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.DeleteParticipationKeyByID(ctx, participationId)
+ return err
+}
+
+// GetParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeyByID(ctx echo.Context) error {
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- err = runtime.BindQueryParameter("form", true, false, "no-wait", ctx.QueryParams(), &params.NoWait)
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter no-wait: %s", err))
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
}
+ ctx.Set("api_key.Scopes", []string{""})
+
// Invoke the callback with all the unmarshalled arguments
- err = w.Handler.RegisterParticipationKeys(ctx, address, params)
+ err = w.Handler.GetParticipationKeyByID(ctx, participationId)
return err
}
@@ -227,7 +266,10 @@ func RegisterHandlers(router interface {
router.DELETE("/v2/catchup/:catchpoint", wrapper.AbortCatchup, m...)
router.POST("/v2/catchup/:catchpoint", wrapper.StartCatchup, m...)
- router.POST("/v2/register-participation-keys/:address", wrapper.RegisterParticipationKeys, m...)
+ router.GET("/v2/participation", wrapper.GetParticipationKeys, m...)
+ router.POST("/v2/participation", wrapper.AddParticipationKey, m...)
+ router.DELETE("/v2/participation/:participation-id", wrapper.DeleteParticipationKeyByID, m...)
+ router.GET("/v2/participation/:participation-id", wrapper.GetParticipationKeyByID, m...)
router.POST("/v2/shutdown", wrapper.ShutdownNode, m...)
}
@@ -235,136 +277,142 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPbtrLov4LROTP5eKJk56On8UznPDdOW7+maSZ2++49cW4LkSsJNQmwAGhJzfX/",
- "fgcLgARJUJI/Tnoz5/yUWAQWi93FYnexWHwcpaIoBQeu1ejo46ikkhagQeJfNE1FxXXCMvNXBiqVrNRM",
- "8NGR/0aUlowvRuMRM7+WVC9H4xGnBTRtTP/xSMLvFZOQjY60rGA8UukSCmoA601pWteQ1slCJA7EsQVx",
- "ejK63vKBZpkEpfpY/sjzDWE8zasMiJaUK5qaT4qsmF4SvWSKuM6EcSI4EDEnetlqTOYM8kxN/CR/r0Bu",
- "glm6wYendN2gmEiRQx/Pl6KYMQ4eK6iRqhlCtCAZzLHRkmpiRjC4+oZaEAVUpksyF3IHqhaJEF/gVTE6",
- "ej9SwDOQyK0U2BX+dy4B/oBEU7kAPfowjk1urkEmmhWRqZ066ktQVa4VwbY4xwW7Ak5Mrwn5oVKazIBQ",
- "Tt5985I8ffr0hZlIQbWGzAnZ4Kya0cM52e6jo1FGNfjPfVmj+UJIyrOkbv/um5c4/pmb4L6tqFIQXyzH",
- "5gs5PRmagO8YESHGNSyQDy3pNz0ii6L5eQZzIWFPntjG98qUcPw/lSsp1emyFIzrCF8IfiX2c1SHBd23",
- "6bAagVb70lBKGqDvD5IXHz4ejg8Prv/y/jj5h/vz+dPrPaf/soa7gwLRhmklJfB0kywkUFwtS8r79Hjn",
- "5EEtRZVnZEmvkPm0QFXv+hLT16rOK5pXRk5YKsVxvhCKUCdGGcxplWviByYVz42aMtCctBOmSCnFFcsg",
- "Gxvtu1qydElSqiwIbEdWLM+NDFYKsiFZi89uy2K6Dkli8LoVPXBC/3uJ0cxrByVgjdogSXOhINFix/bk",
- "dxzKMxJuKM1epW62WZHzJRAc3Hywmy3SjhuZzvMN0cjXjFBFKPFb05iwOdmIiqyQOTm7xP5uNoZqBTFE",
- "Q+a09lGzeIfI1yNGhHgzIXKgHInn112fZHzOFpUERVZL0Eu350lQpeAKiJj9Bqk2bP9/Zz++IUKSH0Ap",
- "uoC3NL0kwFORDfPYDRrbwX9TwjC8UIuSppfx7TpnBYug/ANds6IqCK+KGUjDL78/aEEk6EryIYQsxB1y",
- "VtB1f9BzWfEUmdsM2zLUjCgxVeZ0MyGnc1LQ9VcHY4eOIjTPSQk8Y3xB9JoPGmlm7N3oJVJUPNvDhtGG",
- "YcGuqUpI2ZxBRmooWzBxw+zCh/Gb4dNYVgE6HsggOvUoO9DhsI7IjFm65gsp6QICkZmQn5zmwq9aXAKv",
- "FRyZbfBTKeGKiUrVnQZwxKG3m9dcaEhKCXMWkbEzRw6jPWwbp14LZ+CkgmvKOGRG8yLSQoPVRIM4BQNu",
- "d2b6W/SMKvji2dAG3nzdk/tz0eX6Vo7vxW1slNglGdkXzVe3YONmU6v/Hs5fOLZii8T+3GMkW5ybrWTO",
- "ctxmfjP882SoFCqBFiH8xqPYglNdSTi64I/NXyQhZ5ryjMrM/FLYn36ocs3O2ML8lNufXosFS8/YYoCY",
- "Na5Rbwq7FfYfAy+ujvU66jS8FuKyKsMJpS2vdLYhpydDTLYwbyqYx7UrG3oV52vvady0h17XjBxAcpB2",
- "JTUNL2EjwWBL0zn+s56jPNG5/MP8U5Z5jKZGgN1Gi0EBFyx4534zP5klD9YnMFBYSg1Rp7h9Hn0MEPqr",
- "hPnoaPSXaRMpmdqvaurgmhGvx6PjBs79j9T0tPPrODLNZ8K45Q42HVuf8P7xMVCjmKCh2sHh61ykl7fC",
- "oZSiBKmZ5ePMwOmvFARPlkAzkCSjmk4ap8raWQPyjh2/w37oJYGMbHE/4n9oTsxnswqp9uabMV2ZMkac",
- "CAJNmbH47D5iRzIN0BIVpLBGHjHG2Y2wfNkMbhV0rVHfO7J86EKLcOeVtSsJ9vCTMFNvvMbjmZC3k5eO",
- "IHDS+MKEGqi19Wtm3uYsNq3KxNEnYk/bBh1ATfixr1ZDCnXBx2jVosKZpv8EKigD9T6o0AZ031QQRcly",
- "uIf1uqRq2Z+EMXCePiFn3x0/P3zyy5PnX5gdupRiIWlBZhsNijx0+wpRepPDo/7MUMFXuY5D/+KZ96Da",
- "cHdSCBGuYe+zos7BaAZLMWLjBQa7E7mRFb8HEoKUQkZsXhQdLVKRJ1cgFROR8MVb14K4FkYPWbu787vF",
- "lqyoImZsdMcqnoGcxChv/Czc0jUUatdGYUGfr3lDGweQSkk3PQ7Y+UZm58bdhydt4nvrXpESZKLXnGQw",
- "qxbhHkXmUhSEkgw7okJ8IzI401RX6h60QAOsQcYwIkSBzkSlCSVcZGZBm8Zx/TAQy8QgCsZ+dKhy9NLu",
- "PzMw1nFKq8VSE2NWihhrm44JTS1TEtwr1IDrV/vstpUdzsbJcgk025AZACdi5vwr5/nhJCmGZbQ/cXHa",
- "qUGr9glaeJVSpKAUZIk7XtqJmm9nuay30AkRR4TrUYgSZE7lLZHVQtN8B6LYJoZubU44p7SP9X7Db2Ng",
- "d/CQjVQaH9NKgbFdzOrOQcMQCfekyRVIdM7+qfzzg9yWfVU5cHTiduBzVpjlSzjlQkEqeKaiwHKqdLJr",
- "2ZpGLTPBzCBYKbGVioAHAgSvqdLWRWc8Q5PRqhscB/vgEMMID+4oBvLPfjPpw06NnuSqUvXOoqqyFFJD",
- "FpsDh/WWsd7Auh5LzAPY9falBakU7II8RKUAviOWnYklENUuRlTHsPqTw3C82Qc2UVK2kGgIsQ2RM98q",
- "oG4YPh5AxPgXdU8UHKY6klPHrMcjpUVZmvWnk4rX/YbIdGZbH+ufmrZ94aK60euZADO69jg5zFeWsvbg",
- "YEmNbYeQSUEvzd6ElpqNJfRxNosxUYynkGyTfLMsz0yrcAnsWKQDRrI7mgxG6yyOjvxGhW5QCHZwYWjC",
- "Axb7WxsBPw/i5vdgtUSgGkmjnKDp5uNqZnMIm8CapjrfGJWrl7AhK5BAVDUrmNb2SKNt1GhRJiGAqBO1",
- "ZUTnxtrosTdJ9/GrzxBUML2+cToe2S10O37nnU20RQ63eZdC5JPd0tcjRhSDfYzgY1IKw3XmTtD8MUvO",
- "lO4h6TZUjGHUC/mBapEZZ0D+U1QkpRyNgUpDrZ2ExCWPW4EZwSjTekxmd92GQpBDAdbGwS+PH3cn/vix",
- "4zlTZA4rf+xsGnbJ8fgxWuxvhdJ3XgEd0VyfRpQMupZGY0VShYwDOdnpZiLcvbzLAPTpiR8QF5MyGsVO",
- "XAoxv4fZsmwdO2zIYB2bqeMcGowPjHW1UaAn0Y2wNAhGzhtBXubojYp5RyJJAUZU1JKVBmRzNrLR0Mqr",
- "+K+Hfz96f5z8gyZ/HCQv/s/0w8dn148e9358cv3VV//d/unp9VeP/v7XmPGgNJvFIxffUbU0mDrNsean",
- "3MYe50Jak3PjdjIx/9R4d0TMMNNTPpjSPkL3NsYQxgm1zEaZM4ZKvrmHTcYCIhJKCQpVQmjgK/tVzMO0",
- "Cid5aqM0FH0f2Xb9ZcBCeOf3156UCp4zDkkhOGyimYSMww/4MdbbqqWBzrhBDPXt2h8t/DtotcfZh5l3",
- "pS9yO1BDb+skj3tgfhduJzwSJpSgewd5SShJc4bOn+BKyyrVF5yieRmIayS06o3mYYfjpW8S93AiDogD",
- "dcGpMjSsjc5o2GwOEXfyGwDvd6hqsQClO8bNHOCCu1aMk4ozjWMVhl+JZVgJEuObE9uyoBsypzn6R3+A",
- "FGRW6fZ2j+feShv3xcZqzDBEzC841SQH48r9wPj5GsH542UvMxz0SsjLmgpxnb8ADoqpJK5Iv7VfUZ+6",
- "6S+dbsUkRPvZ65tPvQF43GOnsg7z0xNnCp+eoL3TRGl6uH8y171gPIkK2fkSSME4Jvd0ZIs8NFabF6BH",
- "TbzHcf2C6zU3gnRFc5ZRfTtx6Kq43lq0q6MjNS1GdDwxP9cPsSO0hUhKml7iCcpowfSymk1SUUy9CzBd",
- "iNodmGYUCsHxWzalJZuqEtLp1eEOc+wO+opE1NX1eOS0jrr3s1oHODah7ph1DMT/rQV58O2rczJ1nFIP",
- "bIqGBR2crUe8NndDoBXkNpO3KcY2R+WCX/ATmDPOzPejC55RTaczqliqppUC+TXNKU9hshDkiDiQJ1TT",
- "C95T8YO3ADCB0mFTVrOcpeQy3IqbpWkzO/sQLi7eGwG5uPjQi5j2N043VHSN2gGSFdNLUenEpa4lElZU",
- "ZhHUVZ26hJBt4um2UcfEwbYS6VLjHPy4qqZlqZJcpDRPlKYa4tMvy9xMPxBDRbATnrgTpYX0StBoRosN",
- "8veNcDFjSVc+77FSoMivBS3fM64/kOSiOjh4CuS4LF8bmGcGj1+drjEyuSmh5d/vmSvRAIv59jhxa1DB",
- "WkualHQBKjp9DbRE7uNGXWBYOs8JdgtpUp83IqhmAp4ewwyweNw4GwQnd2Z7+TsI8SngJ2QhtjHaqQkW",
- "3pZfBtR3IjdCdmt2BTCiXKr0MjFrOzorZUTcc6ZOTV4YnewjuIotuFkELot7BiRdQnoJGSaUQlHqzbjV",
- "3R8SuB3Oqw6mbOK1TfrA7EAMhcyAVGVGnQ1A+aabpqVAa5+b9g4uYXMumuTCm+RlXY9HqU2FTozMDC1U",
- "lNRgMzLCGi5bB6PLfHfgZDClZUkWuZi51V2LxVEtF77P8EK2O+Q9LOKYUNRk2CLvJZURQljhHyDBLSZq",
- "4N1J9GPTK6nULGWlnf9+WWhvW30MkF2bS3Q7EfPurtFT6lElZhsnM6riGwiYL4YfZg11z+P8SDaqiDOY",
- "ELy85wR3lqMtUh8F2pVNJRpdftr2NtIQanEpAcmbXd2j0aZIaD4sqfIXEPCehl8we220Q4cW9aGTkSJ/",
- "6oT+XmM5MTNuDld0iP7DWbOnwVFScBmjzon1iq27GMZ1frS9F+lzZ33CrM+SHY1vlPE6Hrnshhg7BEcr",
- "I4McFnbitrEXFIfaAxUwyODx43yeMw4kiZ1KUaVEyuwNkkaXuzHAGKGPCbEBHrI3hJgYB2hjtBwBkzci",
- "XJt8cRMkOTAMr1MPG+Pswd+wO9rcXFB15u1OM7SvO5pFNG4SyC0b+1Go8SiqkoY8hFYrYpvMoOdSxUTU",
- "qKZ+XKYf/VGQA27HSUuzJpexaJ2xKgDF8Mx3C9wG8pDNzSb/KDg0kbBgSkPjN5vV6gNBnzZ2cSU0JHMm",
- "lU7QZY9OzzT6RqEx+I1pGlc/LVIRe8ONZXHtg8NewibJWF7Fue3G/f7EDPum9p9UNbuEDW4yQNMlmeGN",
- "TLMLtYY3bbYMbU9mt074tZ3wa3pv891PlkxTM7AUQnfG+EykqqNPti2miADGhKPPtUGSblEv6PucQK5j",
- "ibeBT4ZerVGYNjN8MGrQW0yZh73N/AqwGNa8FlJ0LoGhu3UWDE/iKM8I08GFxn6W4MAaoGXJsnXHh7dQ",
- "B47t0IC/gaFuLf7IUdSoBraDAoG/HktEkeBjDpalwZ5pr6bycG6TvShjrK+QIIFCCIdiyhdW6BPKiDbe",
- "/t1Fq3Og+few+dm0xemMrseju7n8MVo7iDto/bZmb5TOGMu2LmArgndDktOylOKK5okLjAyJphRXTjSx",
- "uY+jfGJVF3e/z18dv37r0De+Zw5U2lDZ1llhu/KzmZXxiIUcWCD+4raxVr3vbA2xgPn1bZgwmLJagrsk",
- "G9hyRos54bLLqwmUBUvRBVfm8SO1naESF9OzU9wS24OyDu01HrGN7LWjefSKsty7oh7bgeMvnFwTT72x",
- "VggB3DkqGAR3k3tVN73VHV8djXTt0EnhWFuu8Rb2proigncTi4wJiR4uimpBN0aCbHC6r5x4VSRm+SUq",
- "Z2k8bMFnyggHtzFf05hg4wFj1ECs2MARAq9YAMs0U3uclnWQDMaIEhNDSltoNxOuxFDF2e8VEJYB1+aT",
- "xFXZWahmXfoyFf3t1NgO/bEcYFuyogF/FxvDgBqyLhCJ7QZGGGHuoXtSO5x+onVo3PwQBAZvcFAVjtjb",
- "ErccMjn5cNJsT/uX7UhxWBGor/+MYNjb47vLEfmwxdIiOjBGtLzQ4G5xPLxTmN432COaLQHRDTeDsS0+",
- "kisRAVPxFeW2WojpZ2noeiuwMQPTayUkpt0riJ7SM5XMpfgD4p7s3DAqkvvoSInmIvaeRNKZu0q0jso0",
- "daA8fUM8BkV7yJILPpL2QeLACkcpD0LneI/VB7got2JtK5u0jq/jiyNMOZla+M3icDj30nRyuprR2CVf",
- "Y1AZnI6bQ5pWKE4L4jt7LrioYSN7wXlP3ZbZXPUSZJOg3L8XdUvj6PMS+QxSVtA8biVlSP32zZyMLZgt",
- "D1MpCOqPOEC2rpaVIlfDxR6DNaQ5nZODcVDhyHEjY1dMsVkO2OLQtphRhbtWHW6tu5jpAddLhc2f7NF8",
- "WfFMQqaXyhJWCVIbsOjK1bHvGegVACcH2O7wBXmIUX/FruCRoaKzRUZHhy8wLcX+cRDb7FwdqG16JUPF",
- "8v+dYonLMR57WBhmk3JQJ9F7E7Z437AK27KabNd91hK2dFpv91oqKKcLiJ/mFjtwsn2Rmxg07NCFZ7by",
- "lNJSbAjT8fFBU6OfBlLTjPqzaJBUFAXThVlAWhAlCiNPTXERO6gHZ8tYuQv/Hi//EY9YSus2QNdh/rQB",
- "YruXx2aNB2FvaAFtso4JtdeLctZc4HQKcUJO/SVFrIBQFz6wtDFjmamjSWdYiBe9GdfoRFV6nnxJ0iWV",
- "NDXqbzKEbjL74lmk6kP7oje/GeKfnO4SFMirOOnlgNh7a8L1JQ+54ElhNEr2qEkFDVZl9Lq20DSPJ7V4",
- "jd7NadoOel8D1EBJBsWtaokbDTT1nQSPbwF4R1Gs53MjebzxzD65ZFYyLh60Mhz66d1rZ2UUQsaurDfL",
- "3VkcErRkcIX5NXEmGZh35IXM9+LCXbD/c09ZGg+gNsv8Wo45Al9XLM9+blLbO4VzJOXpMnrGMTMdf2kq",
- "fdVTtus4ekN6STmHPArO7pm/+L01svv/JvYdp2B8z7bdgjh2up3JNYi30fRI+QENeZnOzQAhVdu5vnVy",
- "WL4QGcFxmuu4jZT1a/wExUF+r0DpWNVR/GDzKjGWZfwCW5uCAM/Qqp6Qb22l3iWQ1g1NtGZZUeX2th9k",
- "C5AuyFqVuaDZmBg456+OXxM7qu1jKyra2hgLNObas+jEMIK7+/ulOvlSWfE0zP3hbM8LM7NWGi/vKk2L",
- "MpZhb1qc+waYxh/GddHMC6kzISfWwlbefrODGHmYM1kYy7SGZnU8yoT5j9Y0XaLp2tImwyK/f1EXL5Uq",
- "KG5Y14mrr9/jujN4u7outqzLmAjjX6yYsgVa4QraSf31DRfnOvkk//b0ZMW5lZSojt52A+s2ZPfI2cN7",
- "H/qNYtYh/A0NFyUqmcJNa9ycYa/oHeJuwZxeVUN7m7CuKuYLb6eUC85SvMEblIStUXbFXvc5F9njsnM3",
- "LOWXuFuhkcUVLdNTpwc5Kg4W7vGK0BGuH5gNvhqmWumwf2qsKrqkmixAK6fZIBv7UkwuXsK4AldOAev+",
- "BnpSyNZZE2rI6PFlUoe5byhGmOI7YAB/Y769ce4RpuVdMo6GkCObywC0EQ2sRamN9cQ0WQhQbj7tK7nq",
- "vekzwWupGaw/THztSoRhj2rMtO25ZB/UsT+ldKeCpu1L05bgsUzzcyud2A56XJZu0OiN2prDsWJSgwSO",
- "nDYlPtwfELeGH0LbIm5b0wtwPzWCBld4OAkl7sM9wajrcnUK7F3RvLIShS2ITeuJXgNjPILGa8ahqawa",
- "2SDS6JaAjMH1OtBPpZJqawLupdPOgeZ4IhlTaEq7EO1dQXUYjCTBOfoxhtnYlBQbUBx1g8Zwo3xTF3Q1",
- "0h0YEy+xkrQjZL9AGFpVzojKMHGzUzIspjiM4vbF9tobQH8Z9G0i211LalfOTXaioQsvqYjZm6/WkFb2",
- "wF3Y2hC0LEmKN0iD/SIa0WTKOE/FLI/kvp3UH4M6fJhkO9vgv7GKHcMkcSfiN87J8sff2PHGBmsbUs/c",
- "NMKUKLa4JZub/vfK51ws2oh82oDC1jUeikxsdb8yajO8A9mrBWMVa31FEdOQhC/Sik5TfbmmvSZRkUed",
- "0qbe5nanfLhy5hhV/0Ay4rvm9j21u4s9YxhKSUwHM2ipdunxmpLmqnt/YdpylzEINp/Bltm0T1ZE4ytD",
- "OQw2hcF87vXezy7qWZkIeytBfXJMH6HvfeYdKSlzB2jNiu1T1uXo9rOm98neaxjcnYTLfEUgsZn0KykN",
- "C/gJaMpyVdeDrF83CM5bjT3XrceycjdTMHW4dk39HRVQ/jefZW9Hsa9mNFXPMBCwojLzLaI7m980k4EM",
- "kG5OpU1dZXGk5/XIrDk+7acVRq5N4nF5mgvF+CIZyqpon1jW4b4HysZl0YfAElWI1xykq3ao/aMkiRb+",
- "uHUbHttI4Wpi34YIarCqjkVu8G7Tu+byFtaKoPZJGhdzDidIJBTUYCeDK1bDY24j9kv73efR+VoBncoc",
- "EbheXpOdd6T8wTlTPSKGUj8nTuXuzs+7jUnBOLfFZFXsvhU3pAydzVKKrEptrD9cGOBNr72vDG5RJVFD",
- "IO3PsqfTc7xA+zrIdr6EzdTq1XRJeXOTub2sbU1ZO4fgbk6H2/dqbcX3tHxhJ7C4Fzz/TGNpPCqFyJMB",
- "7/K0f22suwYuWXoJGTF7hz9yGqjlRh6iU1OHD1fLja+iWpbAIXs0IcSYW0WpNz6S2K5K0hmcP9Dbxl/j",
- "qFllb3I6O25yweOnpfaRpzvqNw9mu1azrx7ecSgLZPtAes0HVBtdRSob7vtAQCS21zFQAqGyWMSslFte",
- "p9lrffdtuYjoh4nQO4zoy5bhZ+/dd+J5QsI9G4BBIOOGBmA/xXvf6eE8UKtVCvrz3JsBLdoO0H4fwjfe",
- "S5+4w06Hnu3jdMSvL5vu6PVYgvgL9v3V9cl8ltZbAm7cGNd/HjrDsecUA8eFHZpWLM92Mbd1+NsUsMLj",
- "zV/cMfmfUkLrF5t+3F9urprQTaIlXSYgYSJzbQ0eDBUc6+5xouu6Rc5vccNIK8n0Bm8qeK+I/RK9Afot",
- "cPeignugps73dOmG9m00l32wqFs3z1l9K+wTE4XZrzF+prEU66s1Lcoc3Lr46sHsb/D0y2fZwdPDv82+",
- "PHh+kMKz5y8ODuiLZ/TwxdNDePLl82cHcDj/4sXsSfbk2ZPZsyfPvnj+In367HD27IsXf3vg35KyiDbv",
- "NP0H1plLjt+eJucG2YYmtGTfw8ZWljJi7GtW0RRXovEr8tGR/+n/+hU2SUURPH/rfh25VJTRUutSHU2n",
- "q9VqEnaZLtDPSrSo0uXUj9OvfPv2tD4mt+nNyFF7AmpEAZnqROEYv717dXZOjt+eThqBGR2NDiYHk0Ms",
- "DVkCpyUbHY2e4k+4epbI96kTttHRx+vxaLoEmuul+6MALVnqP6kVXSxATlzxLvPT1ZOpP2WbfnQ+5vW2",
- "b+2cahcaCDoEVV6mH1uOehbCxRoo048+3zz4ZOv/Tz+irzX4exuNj3rNsuupr/Lqerg62tOPTWH7a7s6",
- "coidv9h0BhrUwR8bXxjf+1H2V7MgfBYlU+13EGrunmaGq6bXy7rIf/is+ft/0UeAP3TeRHtycPAv9rrT",
- "sxvOeKs924pSRyrrfU0z4jN8cOzDTzf2Kcc79UahEauwr8ej559y9qfciDzNCbYMct/7rP+JX3Kx4r6l",
- "2V2roqBy45exaikF/3QH6nC6UOjdSHZFNYw+oPscO+IaUC74jNaNlQu+DfZv5fKplMvn8Wjakxsu8M9/",
- "xv9Wp5+bOj2z6m5/depMOZtEOrXFwhsLz9en6RdtaVuzQzrZuTrkIZ7mclg9cudPFmykAFCd9CcyGxPx",
- "xWT9hYngnKats985oK1aU9/DRu1S4OdLIL868AnLfsXLbpgCMiZCkl9pnge/YVFQb7ZP4vq+KQqz81Xk",
- "ZoHG0JoD+Kt3mFnv3lgxG9kl+PJBlgat04h+ZmVTenwOgy/j2wrNoQZzInh4cHAQS8nu4uziNxZjjNOv",
- "RJLDFeR9Vg8h0akitO0d6cGXtvrFn0K/OyJ1+M7TDJp6UIPParcrGt0EuxPBH2iyosydmgWRdfv0WsG0",
- "f3Hepmq7i0H1HhF/pTwxIGO4NLeR77p5f35vplxvUXZqWelMrPiw4sJaCjR3lxHxemAdbtCCeAC1ppoQ",
- "/4RwvvFv4BOKSeOi0k08yHT25yudJ7Xq0rULxnEAXOU4ir11S4MzbvfUVV8JnjnM3tiXwTp6L/pCt8Ux",
- "vu5ji/6ustQ3NLbyyheSbP09NSJvzFX78mGCFOqHNDTQfOrShTu/2qS+4Mf280+RX6d1IYvox26gJvbV",
- "xVF8oyZCGkYckVN1rPH9B0NwvBvomNgE0I6mUzz9XQqlpyOjcNrBtfDjh5rGHz3nPa2vP1z/TwAAAP//",
- "+Zih2CWOAAA=",
+ "H4sIAAAAAAAC/+x9/3PbNrL4v4LR3UyafETJSZxe45nOfdwkbf2appnY7b13cV4LkSsJNQmwAGhJzfP/",
+ "/gYLgARJUJK/nPsy158Si8Bisdhd7C4Wi4+jVBSl4MC1Gh19HJVU0gI0SPyLpqmouE5YZv7KQKWSlZoJ",
+ "Pjry34jSkvHFaDxi5teS6uVoPOK0gKaN6T8eSfitYhKy0ZGWFYxHKl1CQQ1gvSlN6xrSOlmIxIE4tiBO",
+ "Xo6utnygWSZBqT6WP/B8QxhP8yoDoiXliqbmkyIrppdEL5kirjNhnAgORMyJXrYakzmDPFMTP8nfKpCb",
+ "YJZu8OEpXTUoJlLk0MfzhShmjIPHCmqk6gUhWpAM5thoSTUxIxhcfUMtiAIq0yWZC7kDVYtEiC/wqhgd",
+ "vR8p4BlIXK0U2CX+dy4BfodEU7kAPfowjk1urkEmmhWRqZ046ktQVa4VwbY4xwW7BE5Mrwn5vlKazIBQ",
+ "Tt59/YI8ffr0uZlIQbWGzDHZ4Kya0cM52e6jo1FGNfjPfV6j+UJIyrOkbv/u6xc4/qmb4L6tqFIQF5Zj",
+ "84WcvByagO8YYSHGNSxwHVrcb3pEhKL5eQZzIWHPNbGN73RRwvH/0FVJqU6XpWBcR9aF4FdiP0d1WNB9",
+ "mw6rEWi1Lw2lpAH6/iB5/uHj4/Hjg6u/vD9O/un+fPb0as/pv6jh7qBAtGFaSQk83SQLCRSlZUl5nx7v",
+ "HD+opajyjCzpJS4+LVDVu77E9LWq85LmleETlkpxnC+EItSxUQZzWuWa+IFJxXOjpgw0x+2EKVJKccky",
+ "yMZG+66WLF2SlCoLAtuRFctzw4OVgmyI1+Kz2yJMVyFJDF43ogdO6P8uMZp57aAErFEbJGkuFCRa7Nie",
+ "/I5DeUbCDaXZq9T1NitytgSCg5sPdrNF2nHD03m+IRrXNSNUEUr81jQmbE42oiIrXJycXWB/NxtDtYIY",
+ "ouHitPZRI7xD5OsRI0K8mRA5UI7E83LXJxmfs0UlQZHVEvTS7XkSVCm4AiJmv0KqzbL/x+kPb4iQ5HtQ",
+ "ii7gLU0vCPBUZMNr7AaN7eC/KmEWvFCLkqYX8e06ZwWLoPw9XbOiKgivihlIs15+f9CCSNCV5EMIWYg7",
+ "+Kyg6/6gZ7LiKS5uM2zLUDOsxFSZ082EnMxJQddfHowdOorQPCcl8IzxBdFrPmikmbF3o5dIUfFsDxtG",
+ "mwULdk1VQsrmDDJSQ9mCiRtmFz6MXw+fxrIK0PFABtGpR9mBDod1hGeM6JovpKQLCFhmQn50mgu/anEB",
+ "vFZwZLbBT6WESyYqVXcawBGH3m5ec6EhKSXMWYTHTh05jPawbZx6LZyBkwquKeOQGc2LSAsNVhMN4hQM",
+ "uN2Z6W/RM6rg88OhDbz5uufqz0V31beu+F6rjY0SK5KRfdF8dQIbN5ta/fdw/sKxFVsk9ufeQrLFmdlK",
+ "5izHbeZXs36eDJVCJdAihN94FFtwqisJR+f8kfmLJORUU55RmZlfCvvT91Wu2SlbmJ9y+9NrsWDpKVsM",
+ "ELPGNepNYbfC/mPgxdWxXkedhtdCXFRlOKG05ZXONuTk5dAiW5jXZczj2pUNvYqztfc0rttDr+uFHEBy",
+ "kHYlNQ0vYCPBYEvTOf6zniM/0bn83fxTlnmMpoaB3UaLQQEXLHjnfjM/GZEH6xMYKCylhqhT3D6PPgYI",
+ "/VXCfHQ0+su0iZRM7Vc1dXDNiFfj0XED5+5Hanra+XUcmeYzYdyuDjYdW5/w7vExUKOYoKHaweGrXKQX",
+ "N8KhlKIEqZldx5mB05cUBE+WQDOQJKOaThqnytpZA/yOHb/FfuglgYxscT/gf2hOzGcjhVR7882YrkwZ",
+ "I04EgabMWHx2H7EjmQZoiQpSWCOPGOPsWli+aAa3CrrWqO8dWT50oUVW55W1Kwn28JMwU2+8xuOZkDfj",
+ "lw4jcNL4woQaqLX1a2beXllsWpWJo0/EnrYNOoCa8GNfrYYU6oKP0apFhVNN/wVUUAbqXVChDeiuqSCK",
+ "kuVwB/K6pGrZn4QxcJ4+IaffHj97/OTnJ88+Nzt0KcVC0oLMNhoU+cztK0TpTQ4P+zNDBV/lOg7980Pv",
+ "QbXh7qQQIlzD3keizsBoBksxYuMFBruXkIOGt1RqlrISqXWShRRtQ2k1JBewIQuhSYZAMrvTI1S5kRW/",
+ "g4UBKYWMWNLIkFqkIk8uQSomIkGRt64FcS2MdrPWfOd3iy1ZUUXM2OjkVTwDOYmtp/He0FDQUKhd248F",
+ "fbbmDcUdQCol3fTW1c43Mjs37j4r3Sa+9xkUKUEmes1JBrNqEe58ZC5FQSjJsCOq2Tcig1NNdaXuQLc0",
+ "wBpkzEKEKNCZqDShhIvMqAnTOK51BiKkGJrBiJIOFZle2l1tBsbmTmm1WGpijFURW9qmY0JTuygJ7kBq",
+ "wKGsIwG2lR3ORt9yCTTbkBkAJ2LmvDbnT+IkKQZ7tD/HcTqvQav2NFp4lVKkoBRkiTu02omab2dXWW+h",
+ "EyKOCNejECXInMobIquFpvkORLFNDN3aSHGubh/r/YbftoDdwcNlpNJ4rpYLjEVkpNuouSES7kmTS5Do",
+ "8v1L188PctPlq8qBAxm3r5+xwogv4ZQLBangmYoCy6nSyS6xNY1axoeZQSApMUlFwANhh9dUaev4M56h",
+ "IWrVDY6DfXCIYYQHdxQD+Se/mfRhp0ZPclWpemdRVVkKqSGLzYHDestYb2BdjyXmAex6+9KCVAp2QR6i",
+ "UgDfEcvOxBKIahd5qiNj/clhkN/sA5soKVtINITYhsipbxVQNwxKDyBivJa6JzIOUx3OqSPh45HSoiyN",
+ "/Omk4nW/ITKd2tbH+sembZ+5qG70eibAjK49Tg7zlaWsPY5YUmMxImRS0AuzN6H9ZyMUfZyNMCaK8RSS",
+ "bZxvxPLUtApFYIeQDpje7sAzGK0jHB3+jTLdIBPsWIWhCQ/4AS2j9DvY3IHdWXZA9mn9EjRlOWQk+BnV",
+ "NSm7NvJOa7432j5m3jHJ9kSBZaMIlW5m2e1l9XaHipi9kenkTOEO1aOfQvTt4clZcORyB6ZpBKpRJ5QT",
+ "RNSHZI0FEDaBNU11vjH7ql7ChqxAAlHVrGBa29OwNjtpUSYhgKj/vWVEFwGxBw9+BfYJyZwiqGB6/aUY",
+ "j6ydtB2/s46l1CKHs9BKIfLJbhXTI0YUg/1EoBRm1Zk7fPUndJ6TWkg6qwnDX7W2fqBaZMYZkP8SFUkp",
+ "R4uv0lBvQUKiXsf93oxgdsx6TGZNq4ZCkEMB1pDFL48edSf+6JFbc6bIHFY+Y8E07JLj0SN0y94KpVvC",
+ "dUeq7iSymWBgwuxMzmjs6pTJXmrtJNtrJdtxhZOXflCUKaUc45rp31oBdCRzvc/cQx5ZUrXcPXeEu1dc",
+ "JgAdm7dddynE/A5my7J17Jgug3Vspo5x0Sl6YDyIjQI9iRp7pUEwclIP8iLHiIuYdwSSFGAkRS1ZaUA2",
+ "p4obDa2MpP/+7O9H74+Tf9Lk94Pk+f+bfvh4ePXwUe/HJ1dffvk/7Z+eXn358O9/jRnISrNZPOb3LVVL",
+ "g6lTnGt+wm3Ufi6kdas2zloT8/vGu8NiZjE95YMp7SVusQVhnFC72MhzxhjPN3ewx1pAREIpQaFGDJ1Y",
+ "Zb+KeZiQ5DhPbZSGoh8Hsl1/HrCC33kbsselgueMQ1IIHrPofsCv3+PHWG+rlQc64/441LdrY7fw76DV",
+ "HmefxbwtfXG1AzX0tk6PuoPF78LthADDVCwMYUBeEkrSnGGAQ3ClZZXqc07RhQrYNXIo4R3DYaf6hW8S",
+ "9+IjTrYDdc6pMjSsHatoaHgOkZDJ1wDet1bVYgFKd2y7OcA5d60YJxVnGscqzHoldsFKkHgyMLEtC7oh",
+ "c5pjDOB3kILMKt22djBjRGnjott4pBmGiPk5p5rkQJUm3zN+tkZwPjHD8wwHvRLyoqZCXOcvgINiKokr",
+ "0m/sV9SnbvpLp1sxfdd+9vrmvjcAj3ssn8FhfvLSeQInL9HcayKRPdzvLTxVMJ5EmexsCaRgHNPiOrxF",
+ "PjNGq2egh01M0636OddrbhjpkuYso/pm7NBVcT1ZtNLR4ZrWQnSiDX6uH2KHzwuRlDS9wLPH0YLpZTWb",
+ "pKKYeg9ouhC1NzTNKBSC47dsSks2VSWk08vHO8yxW+grElFXV+OR0zrqzrMcHODYhLpj1nE+/7cW5ME3",
+ "r87I1K2UemCTmyzoICsl4rS6uzWtgxwzeZucb7O7zvk5fwlzxpn5fnTOM6rpdEYVS9W0UiC/ojnlKUwW",
+ "ghwRB/Il1fSc91T84P0ZTD122JTVLGdpPPAyHtmc6D6E8/P3hkHOzz/0TgX6G6cbKiqjdoBkxfRSVDpx",
+ "SZ+JhBWVWQR1VSf9IWSbsr1t1DFxsC1HuqRSBz+uqmlZqiQXKc0TpamG+PTLMjfTD9hQEeyEuSpEaSG9",
+ "EjSa0WKD6/tGuHMRSVc+Y7hSoMgvBS3fM64/kOS8Ojh4CuS4LF8bmKcGj1+crjE8uSmhFd7YM8uoARYL",
+ "beDErUEFay1pUtIFqOj0NdASVx836gIDaXlOsFtIk/qkHkE1E/D0GF4Ai8e186hwcqe2l7+9E58CfsIl",
+ "xDZGOzUB8ZuulwH1rcgNk914uQIY0VWq9DIxsh2dlTIs7lemTupfGJ3sTykUW3AjBO7+wwxIuoT0AjJM",
+ "xYai1Jtxq7s/CHM7nFcdTNkrCzZdCvNqMRI0A1KVGXU2AOWbboKjAq19Vuc7uIDNmWjScq+T0Xg1HqX2",
+ "EkFieGZIUJFTg83IMGsotg5Gd/HdoarBlJYlWeRi5qS7Zoujmi98n2FBtjvkHQhxjClqMmzh95LKCCEs",
+ "8w+Q4AYTNfBuxfqx6bXCaXvmb7aiZAhk1+YS3U7EvLtr9JR6VInZxsmMqvgGAuaLWQ8jQ90zZz+SDari",
+ "DCYEr706xp3laIvUx91WsqlsRR7tPb4h1OJcApI3u7pHo02R0HxYUuWv7uANJy8we220Qwdz9cGq4SJ/",
+ "sor+XmM5MTNuDpd0iP7D+eYnwXFpcI2pzib3iq0rDOP6ZoG9Ueyzzn2quc8vH42vlSs+HrkMnthyCI5W",
+ "RgY5LOzEbWPPKA61BypYIIPHD/N5zjiQJHbySpUSKbN3rxpd7sYAY4Q+IsQGeMjeEGJsHKCNhwUImLwR",
+ "oWzyxXWQ5MDwdIF62HjMEPwNu6PNzdVuZ97uNEP7uqMRonFz9cIuYz8KNR5FVdKQh9AO79smM+i5VDEW",
+ "NaqpH5fpR38U5IDbcdLSrMlFLFpnrApANjz13QK3gXzG5maTfxicGUlYMKWh8ZuNtPpA0P3GLi6FhmTO",
+ "pNIJuuzR6ZlGXys0Br82TePqp3Omo2wMIK59cNgL2CQZy6v4artxv3tphn1T+0+qml3ABjcZoOmSzPAu",
+ "c/Skd8vQNvtg64Rf2wm/pnc23/14yTQ1A0shdGeMT4SrOvpkmzBFGDDGHP1VGyTpFvWCvs9LyHUsZT3w",
+ "ydCrNQrT3qkYjBr0hCnzsLeZXwEWw5rXQorOJTB0t86C4Ukc5RlhOrgK3M+EHZABWpYsW3d8eAt14NgO",
+ "DfhrGOrW4o8cRY1qYDsoEPjrsWQrCT7mYJc02DPtpW4ezm2yF2WM9RUSJFAI4VBM+ZIkfUIZ1sZ787to",
+ "dQY0/w42P5m2OJ3R1Xh0O5c/RmsHcQet39bLG6UzxrKtC9iK4F2T5LQspbikeeICI0OsKcWlY01s7uMo",
+ "96zq4u732avj128d+sb3zIFKGyrbOitsV34yszIesZADAuJLHhhr1fvO1hALFr++RxYGU1ZLcNfLA1vO",
+ "aDHHXFa8mkBZIIouuDKPH6ntDJW4mJ6d4pbYHpR1aK/xiG1krx3No5eU5d4V9dgOHH/h5Jp46rW1Qgjg",
+ "1lHBILib3Km66Ul3XDoa7tqhk8KxtlyAL2yNB0UE7+ZVGRMSPVxk1YJuDAfZ4HRfOfGqSIz4JSpnaTxs",
+ "wWfKMAe3MV/TmGDjAWPUQKzYwBECr1gAyzRTe5yWdZAMxogSE0NKW2g3E644V8XZbxUQlgHX5pNEqewI",
+ "qpFLX+Clv50a26E/lgNsi7004G9jYxhQQ9YFIrHdwAgjzJFcXe9w+onWoXHzQxAYvMZBVThib0vccsjk",
+ "+MNxsz3tX7YjxWEtrb7+M4xh6y7sLuTlwxZLi+jAGNHCXIO7xfHwTmF6X2OPaLYERDfcDMa2bE+uRARM",
+ "xVeU2zo7pp+loeutwMYMTK+VkHi1REH0lJ6pZC7F7xD3ZOdmoSKpn46UaC5i70kkZb+rROuoTFNBzdM3",
+ "xGOQtYcsueAjaR8kDkg4cnkQOscb4D7ARblla1sTqHV8HReOMOVkauE3wuFw7qXp5HQ1o7Hr8cagMjgd",
+ "N4c0rVCcFsR39qvgooYN7wXnPXVbZu9jlCCb/Oz+3b8bGkefFstnkLKC5nErKUPqt2+fZWzBbGGlSkFQ",
+ "uccBshXpLBe56kf2GKwhzcmcHIyD2mBuNTJ2yRSb5YAtHtsWM6pw16rDrXUXMz3geqmw+ZM9mi8rnknI",
+ "9FJZwipBagMWXbk69j0DvQLg5ADbPX5OPsOov2KX8NBQ0dkio6PHzzEtxf5xENvsXAW1bXolQ8XyD6dY",
+ "4nyMxx4WhtmkHNRJ9G6QLXs5rMK2SJPtuo8sYUun9XbLUkE5XUD8NLfYgZPti6uJQcMOXXhma7YpLcWG",
+ "MB0fHzQ1+mkgNc2oP4sGSUVRMF0YAdKCKFEYfmrK8thBPThbAM6VyvB4+Y94xFJatwG6DvP9BojtXh6b",
+ "NR6EvaEFtMk6JtReoctZc0nZKcQJOfEXcbF2SF0yxNLGjGWmjiadWUIskcC4Rieq0vPkC5IuqaSpUX+T",
+ "IXST2eeHkXop7RIJ/HqI3zvdJSiQl3HSywG299aE60s+44InhdEo2cMmFTSQymhJAqFpHk9q8Rq9m9O0",
+ "HfS+BqiBkgyyW9ViNxpo6lsxHt8C8JasWM/nWvx47ZndO2dWMs4etDIr9OO7187KKISMlWVoxN1ZHBK0",
+ "ZHCJ+TXxRTIwb7kWMt9rFW6D/R97ytJ4ALVZ5mU55gh8VbE8+6lJbe+UnJKUp8voGcfMdPy5qZFXT9nK",
+ "cbQKwJJyDnkUnN0zf/Z7a2T3/1XsO07B+J5tu6Wk7HQ7k2sQb6PpkfIDGvIynZsBQqq2c33r5LB8ITKC",
+ "4zRXzhsu61fHCgrg/FaB0rF6vfjB5lViLMv4Bbb+CgGeoVU9Id/YGtdLIK0LqmjNsqLK7WVHyBYgXZC1",
+ "KnNBszExcM5eHb8mdlTbx9YitfVfFmjMtWfRiWEE9Sn2S3XyRebiaZj7w9meF2ZmrTReUFeaFmUsw960",
+ "OPMNMI0/jOuimRdSZ0JeWgtbefvNDmL4Yc5kYSzTGprV8cgT5j9a03SJpmtLmwyz/P6FizxXqqAsaF1h",
+ "sS4xgXJn8Ha1i2zpojERxr9YMWVLG8MltJP66xsuznXySf7t6cmKc8spUR297QbWTcjukbOH9z70G8Ws",
+ "Q/hrGi5KVDKF69ZxOsVe0SvU3aJQvXqg9jZhXY/Pl6xPKRecpXiBOSimXKPsyiTvcy6yx13vbljKi7iT",
+ "0IhwRUtR1elBjoqDxam8InSE6wdmg69mUS132D811uNdUk0WoJXTbJCNfREzFy9hXIErGYIVswM9KWTr",
+ "rAk1ZPT4MqnD3NdkI0zxHTCAvzbf3jj3CNPyLhhHQ8iRzWUA2ogGVnHVxnpimiwEKDef9pVc9d70meC1",
+ "1AzWHya+6ivCsEc1Ztr2XLIP6tifUrpTQdP2hWlL8Fim+bmVTmwHPS5LN2j0Rm29wrGCaYMEjpw2JT7c",
+ "HxC3hh9C28JuW9MLcD81jAaXeDgJJe7DPcaoa891SlNe0ryyHIUtiE3riV4DYzyCxmvGoalJHNkg0uiW",
+ "gAuD8jrQT6WSamsC7qXTzoDmeCIZU2hKuxDtbUF1FhhJgnP0YwwvY1M2b0Bx1A0aw43yTV0K2XB3YEy8",
+ "wBrsjpD9InhoVTkjKsPEzU5ZvJjiMIrbl6lsbwB9MejbRLa7ltRKznV2oqELL6mI2Zuv1pBW9sBd2NIY",
+ "tCxJijdIg/0iGtFkyjhPxSyPFcGpPwYVLDHJdrbBf2MFS4ZJ4k7Er52T5Y+/seO1DdY2pJ65aZgpUWxx",
+ "w2Vu+t/pOudi0UbkfgMKW2U8ZJmYdL8yanO4xuixV6z1FUVMQxK+vDE6TfXlmrZMoiKPOqVNpdrtTvlw",
+ "zdkxqv6BZMR3ze17ancXe8YwlJKYDmbQUu3S4zUlzVX3vmDaQrExCDafwRaotY+9ROMrQzkMNoXBfO71",
+ "3s8u6lmZCHsrQX1yTB+h73zmHSkpcwdojcT2KetydPtZ0/tk7zUL3J2Ey3xFILGZvN1ZMKzFIb3M5yD3",
+ "3VYmmux/+bU5kMczEyxkuwDuKtm2cxr3zqyazyHV7HJHpvk/jMXaZDGPvU1ri4oHieesztTxbwJd09Ru",
+ "ENqWCL4Vn+CG/a3RGcozvYDNA0Xa9ZRfRuXPMepNrn0hBbD6QGJYRKhY9N864S4gy1TNGUgFf9pmu0NT",
+ "+GWwKGad7hWr87PXWJ4lCXV2Vl1EZ6gOp4hZ8XuNZbrukXjVZG9jSsZQMnq/Stzw7mXrAqq6oHH96E+Q",
+ "TGGctW6xpZW7dob3Auq4k7+ABsr/5q/Q2FHsY1JN2U6M8q2ozHyLqNnqLeJkIL2rmzBt89JZHOl5PTJr",
+ "ciP6OcORO9GYC5PmQjG+SIZSptrpCHUs/4Gyhy4YIMDye4jXHKQr16v9W12JFj6XYhse20jhnoq4CRHU",
+ "YMksi9zgxcV3zc1MLARD7Utt7kApnCCRUFCDnQzuTw6PuY3YL+x3nyTrC4F0yu5E4Hp+TXZegPRZMUz1",
+ "iBhy/Zy43XJ38u1N/AXGua2GrmKXKbkhZRhJKqXIqtRu0KFggPer9r4PvEWVRK38tD/LnsGW4+3418FV",
+ "hgvYTK3RlC4pb8oUtMXaFkW3cwgu3nVW+05dqbjBmi/sBBZ3gucf6QmNR6UQeTIQOjrp3wntysAFSy8g",
+ "I2bv8OfJA3UqyWcYsajPBlbLjS8DXpbAIXs4IcT4UkWpN/6YoF1yqDM4f6C3jb/GUbPKXtN2TtrknMdT",
+ "Iezbh7fUbx7Mdq1mHwO+5VAWyPaB9JoPqDa6ilRt3ffdnEjgvltJs2Eqi0XMSrnhXbm95LvvqEVYP7zl",
+ "sMP/uWh5dbaoRidYLyTcsXcXRCmv6d3172/sOz2cB2q1SkF/nnsvQIu2A7Tfh/BNaKJP3OGIgp7tE1GI",
+ "1yYw3TGkYQmC1TMIokp+efwLkTB3D7E+eoQDPHo0dk1/edL+bLyvR4+iknlvwYzW8zxu3BjH/DR0uGsP",
+ "MAfyCDrrUbE828UYrayQprId5j387PJn/pDaej9bF7kvqq7M2HXCqN1FQMJE5toaPBgqyPfYI9XDdYsk",
+ "duBmk1aS6Q1eYfIeFfs5ejX8mzoI4958qxPBXR6yfW7UpSU1IZvmhchvhH21qTB7PQbWNZaofrWmRZmD",
+ "E5QvH8z+Bk+/OMwOnj7+2+yLg2cHKRw+e35wQJ8f0sfPnz6GJ188OzyAx/PPn8+eZE8On8wOnxx+/ux5",
+ "+vTw8ezw8+d/e+CfZ7SINk8f/icWoEyO354kZwbZhia0ZFja/QrN6bnwxexoipJofJJ8dOR/+v9ewiap",
+ "KIIX5d2vI5ejNlpqXaqj6XS1Wk3CLtMF+miJFlW6nPpx+hXB357U+TP23gOuqE2NMKyAi+pY4Ri/vXt1",
+ "ekaO355MGoYZHY0OJgeTx1gztgROSzY6Gj3Fn1B6lrjuU8dso6OPV+PRdAk010v3RwFastR/Uiu6WICc",
+ "uKp+5qfLJ1N//D796PzTq23f2pctXFgh6BCUf5p+bDn5WQgXiyNNP/qLKMEn+/jN9CP6aYO/t9H4qNcs",
+ "u5r6sJDr4R6RmH5sXnW5stKRQyykY/OcaPAIzNj40fiEnrK/GoHw6dVMtR8Bqlf3JDOranq9qF+4CW7R",
+ "H73/N31X/0PnmdEnBwf/Zg8mHl5zxltt4dbxVaTk5lc0Iz71D8d+fH9jn3CMjBuFRqzCvhqPnt3n7E+4",
+ "YXmaE2wZXIrpL/2P/IKLFfctze5aFQWVGy/GqqUU/LtVqMPpQqFnJNkl1TD6gK537Ox7QLngy5TXVi74",
+ "3OafyuW+lMun8Q7pk2sK+Kc/4z/V6aemTk+tuttfnTpTzmaXT+0rAo2F16teuYBomjsmnNNtTzt1New3",
+ "oHsvVY1uqWL+sEer/r3l5PDg8P4wCCOcb4QmX+NB1CcqrfsJzjYbqOMTZVmPva3iB6W/EtlmC4UKtShd",
+ "LmjEIpkxblDu7yv9yvq9N6QuYEPs4awPwrtHG9uW0NUtpf+Tfe7qz132D5TbZwdP72/4U5CXLAVyBkUp",
+ "JJUs35AfeX175uZOVJZFk83a4tbTI8b2T0UGC+CJUxLJTGQbXyWmBfACbIC2ZxZMP7ZLPdpg02AQyL5E",
+ "Xz8e0Ud6tiEY1W2rtsgD9t/B5qvNycu+fxbxwLoobvXDuvI/4Prc6Mn8P4X9U9uk92bY2D4dtZd9qKK7",
+ "94z91c3Y5Waqoy/O7rSq/1AR+fNZXv8s758uwp8uwk20zzcQkXtUEFv0jtum1bLSmVjZKgLRoCkWE6S5",
+ "q8aD9XHqYzUtiAfQZKuTH9z1jHxDSikuWWY0o2YFGC1VKxnT2ecgdd5Nr99uWTCOA2AhfRzFlp2iQR6o",
+ "e8980g/QOszeWNMmpt1+qwA9FqfeHI6jcStC51YkUuTp1iqtH1C72rZW/iWF1t/TFWU6mQvp0sCRQv2j",
+ "Ow00n7r7sp1f7a224Mf288+RX6d1Jcfox+6BZOyrOy/0jZpMgPBkHVeqPlN//8EQHIvjuEVsDoqPplPM",
+ "kFwKpaejq/HHziFy+PFDTeOP9cbmaH314ep/AwAA//+cgLi/YKAAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index e4c80509b..e8ba9221e 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -340,6 +340,34 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// ParticipationKey defines model for ParticipationKey.
+type ParticipationKey struct {
+
+ // Address the key was generated for.
+ Address string `json:"address"`
+
+ // When registered, this is the first round it may be used.
+ EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
+
+ // When registered, this is the last round it may be used.
+ EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
+
+ // The key's ParticipationID.
+ Id string `json:"id"`
+
+ // AccountParticipation describes the parameters used by this account in consensus protocol.
+ Key AccountParticipation `json:"key"`
+
+ // Round when this key was last used to propose a block.
+ LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
+
+ // Round when this key was last used to generate a state proof.
+ LastStateProof *uint64 `json:"last-state-proof,omitempty"`
+
+ // Round when this key was last used to vote.
+ LastVote *uint64 `json:"last-vote,omitempty"`
+}
+
// PendingTransactionResponse defines model for PendingTransactionResponse.
type PendingTransactionResponse struct {
@@ -406,7 +434,7 @@ type TealValue struct {
// \[tb\] bytes value.
Bytes string `json:"bytes"`
- // \[tt\] value type.
+ // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
Type uint64 `json:"type"`
// \[ui\] uint value.
@@ -588,6 +616,16 @@ type NodeStatusResponse struct {
TimeSinceLastRound uint64 `json:"time-since-last-round"`
}
+// ParticipationKeyResponse defines model for ParticipationKeyResponse.
+type ParticipationKeyResponse struct {
+
+ // Detailed description of a participation key
+ ParticipationKey string `json:"participationKey"`
+}
+
+// ParticipationKeysResponse defines model for ParticipationKeysResponse.
+type ParticipationKeysResponse []ParticipationKey
+
// PendingTransactionsResponse defines model for PendingTransactionsResponse.
type PendingTransactionsResponse struct {
@@ -598,6 +636,13 @@ type PendingTransactionsResponse struct {
TotalTransactions uint64 `json:"total-transactions"`
}
+// PostParticipationResponse defines model for PostParticipationResponse.
+type PostParticipationResponse struct {
+
+ // encoding of the participation id.
+ PartId string `json:"partId"`
+}
+
// PostTransactionsResponse defines model for PostTransactionsResponse.
type PostTransactionsResponse struct {
@@ -661,22 +706,6 @@ type TransactionParametersResponse struct {
// VersionsResponse defines model for VersionsResponse.
type VersionsResponse Version
-// RegisterParticipationKeysParams defines parameters for RegisterParticipationKeys.
-type RegisterParticipationKeysParams struct {
-
- // The fee to use when submitting key registration transactions. Defaults to the suggested fee.
- Fee *uint64 `json:"fee,omitempty"`
-
- // value to use for two-level participation key.
- KeyDilution *uint64 `json:"key-dilution,omitempty"`
-
- // The last round for which the generated participation keys will be valid.
- RoundLastValid *uint64 `json:"round-last-valid,omitempty"`
-
- // Don't wait for transaction to commit before returning response.
- NoWait *bool `json:"no-wait,omitempty"`
-}
-
// ShutdownNodeParams defines parameters for ShutdownNode.
type ShutdownNodeParams struct {
Timeout *uint64 `json:"timeout,omitempty"`
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index dbecc6dc9..80a74df41 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -616,172 +616,179 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PbuJLoX8HVblUeK0rOa86Jq6b2euLMjO9JMqnYM3vuxrmzENmScEwCPABoS5Pr",
- "/76FBkCCJCjJj7xm/SmxiEej0Wj0C90fR6koSsGBazXa/zgqqaQFaJD4F01TUXGdsMz8lYFKJSs1E3y0",
- "778RpSXji9F4xMyvJdXL0XjEaQFNG9N/PJLwz4pJyEb7WlYwHql0CQU1A+t1aVrXI62ShUjcEAd2iKPD",
- "0eWGDzTLJCjVh/IXnq8J42leZUC0pFzR1HxS5ILpJdFLpojrTBgnggMRc6KXrcZkziDP1MQv8p8VyHWw",
- "Sjf58JIuGxATKXLow/lCFDPGwUMFNVD1hhAtSAZzbLSkmpgZDKy+oRZEAZXpksyF3AKqBSKEF3hVjPbf",
- "jxTwDCTuVgrsHP87lwB/QKKpXIAefRjHFjfXIBPNisjSjhz2Jagq14pgW1zjgp0DJ6bXhLyulCYzIJST",
- "dz++IE+ePHluFlJQrSFzRDa4qmb2cE22+2h/lFEN/nOf1mi+EJLyLKnbv/vxBc5/7Ba4ayuqFMQPy4H5",
- "Qo4OhxbgO0ZIiHENC9yHFvWbHpFD0fw8g7mQsOOe2Ma3uinh/F90V1Kq02UpGNeRfSH4ldjPUR4WdN/E",
- "w2oAWu1LgylpBn2/lzz/8PHR+NHe5b+8P0j+0/357Mnljst/UY+7BQPRhmklJfB0nSwkUDwtS8r7+Hjn",
- "6EEtRZVnZEnPcfNpgaze9SWmr2Wd5zSvDJ2wVIqDfCEUoY6MMpjTKtfET0wqnhs2ZUZz1E6YIqUU5yyD",
- "bGy478WSpUuSUmWHwHbkguW5ocFKQTZEa/HVbThMlyFKDFzXwgcu6OtFRrOuLZiAFXKDJM2FgkSLLdeT",
- "v3Eoz0h4oTR3lbraZUVOlkBwcvPBXraIO25oOs/XROO+ZoQqQom/msaEzclaVOQCNydnZ9jfrcZgrSAG",
- "abg5rXvUHN4h9PWQEUHeTIgcKEfk+XPXRxmfs0UlQZGLJeilu/MkqFJwBUTM/gGpNtv+f45/eUOEJK9B",
- "KbqAtzQ9I8BTkQ3vsZs0doP/Qwmz4YValDQ9i1/XOStYBOTXdMWKqiC8KmYgzX75+0ELIkFXkg8BZEfc",
- "QmcFXfUnPZEVT3Fzm2lbgpohJabKnK4n5GhOCrr6fm/swFGE5jkpgWeML4he8UEhzcy9HbxEiopnO8gw",
- "2mxYcGuqElI2Z5CRepQNkLhptsHD+NXgaSSrABw/yCA49SxbwOGwitCMObrmCynpAgKSmZBfHefCr1qc",
- "Aa8ZHJmt8VMp4ZyJStWdBmDEqTeL11xoSEoJcxahsWOHDsM9bBvHXgsn4KSCa8o4ZIbzItBCg+VEgzAF",
- "E25WZvpX9Iwq+O7p0AXefN1x9+eiu+sbd3yn3cZGiT2SkXvRfHUHNi42tfrvoPyFcyu2SOzPvY1kixNz",
- "lcxZjtfMP8z+eTRUCplACxH+4lFswamuJOyf8ofmL5KQY015RmVmfinsT6+rXLNjtjA/5fanV2LB0mO2",
- "GEBmDWtUm8Juhf3HjBdnx3oVVRpeCXFWleGC0pZWOluTo8OhTbZjXpUwD2pVNtQqTlZe07hqD72qN3IA",
- "yEHcldQ0PIO1BAMtTef4z2qO9ETn8g/zT1nmMZwaAnYXLRoFnLHgnfvN/GSOPFidwIzCUmqQOsXrc/9j",
- "ANC/SpiP9kf/Mm0sJVP7VU3duGbGy/HooBnn9mdqetr1dRSZ5jNh3O4ONh1bnfD24TGjRiFBQbUDww+5",
- "SM+uBUMpRQlSM7uPMzNO/6Tg8GQJNANJMqrppFGqrJw1QO/Y8Wfsh1oSyMgV9wv+h+bEfDankGovvhnR",
- "lSkjxInA0JQZic/eI3Ym0wAlUUEKK+QRI5xdCcoXzeSWQdcc9b1Dy4fuaJHdeWnlSoI9/CLM0hut8WAm",
- "5PXopUMInDS6MKFm1Fr6NStv7yw2rcrE4SciT9sGnYEa82OfrYYY6g4fw1ULC8eafgIsKDPqbWChPdBt",
- "Y0EUJcvhFs7rkqplfxFGwHnymBz/fPDs0ePfHz/7ztzQpRQLSQsyW2tQ5L67V4jS6xwe9FeGDL7KdXz0",
- "7556Dao97lYMIcD12LucqBMwnMFijFh7gYHuUK5lxW8BhSClkBGZF0lHi1TkyTlIxUTEfPHWtSCuheFD",
- "Vu7u/G6hJRdUETM3qmMVz0BOYpg3ehZe6RoKte2isEOfrHiDGzcglZKueztg1xtZnZt3lz1pI99L94qU",
- "IBO94iSDWbUI7ygyl6IglGTYERniG5HBsaa6UrfABZrBGmDMRoQg0JmoNKGEi8wcaNM4zh8GbJloREHb",
- "jw5Zjl7a+2cGRjpOabVYamLEShHb2qZjQlO7KQneFWpA9at1dtvKTmftZLkEmq3JDIATMXP6ldP8cJEU",
- "zTLae1wcd2rAqnWCFlylFCkoBVni3EtbQfPt7C7rDXhCwBHgehaiBJlTeU1gtdA03wIotomBW4sTTint",
- "Q73b9Js2sDt5uI1UGh3TUoGRXczpzkHDEAp3xMk5SFTOPun++Umuu31VOeA6cTfwCSvM8SWccqEgFTxT",
- "0cFyqnSy7diaRi0xwawgOCmxk4oDDxgIXlGlrYrOeIYio2U3OA/2wSmGAR68UczIv/nLpD92avgkV5Wq",
- "bxZVlaWQGrLYGjisNsz1Blb1XGIejF1fX1qQSsG2kYewFIzvkGVXYhFEtbMR1Tas/uLQHG/ugXUUlS0g",
- "GkRsAuTYtwqwG5qPBwAx+kXdEwmHqQ7l1Dbr8UhpUZbm/Omk4nW/ITQd29YH+tembZ+4qG74eibAzK49",
- "TA7yC4tZ6zhYUiPb4cikoGfmbkJJzdoS+jCbw5goxlNINlG+OZbHplV4BLYc0gEh2bkmg9k6h6NDv1Gi",
- "GySCLbswtOABif2ttYCfBHbzW5BaIqMaSqOcoOjm7WrmcgibwIqmOl8blquXsCYXIIGoalYwra1Loy3U",
- "aFEm4QBRJWrDjE6NtdZjL5Luolcf41DB8vrC6Xhkr9DN8J10LtEWOtzlXQqRT7ZTXw8ZUQh2EYIPSCnM",
- "rjPnQfNulpwp3QPSXahow6gP8j3VQjOugPxfUZGUchQGKg01dxISjzxeBWYGw0zrOZm9dRsMQQ4FWBkH",
- "vzx82F34w4duz5kic7jwbmfTsIuOhw9RYn8rlL7xCeiQ5uoowmRQtTQcKxIqZBTIyVY1E8fdSbsMhj46",
- "9BPiYVKGo9iFSyHmt7Balq1izoYMVrGVup1DgfGeka7WCvQkehGWBsCIvxHkWY7aqJh3KJIUYEhFLVlp",
- "hmx8I2sNrbiK/3f/3/ffHyT/SZM/9pLn/zb98PHp5YOHvR8fX37//f9v//Tk8vsH//6vMeFBaTaLWy5+",
- "pmppIHWcY8WPuLU9zoW0Iufa3WRi/rnh7pCY2UyP+WBJuxDd29iGME6o3WykOSOo5OtbuGTsQERCKUEh",
- "SwgFfGW/inkYVuEoT62VhqKvI9uuvw9ICO/8/dqjUsFzxiEpBId1NJKQcXiNH2O9LVsa6IwXxFDfrvzR",
- "gr8DVnueXTbzpvjF3Q7Y0Ns6yOMWNr87bsc8EgaUoHoHeUkoSXOGyp/gSssq1aecongZkGvEtOqF5mGF",
- "44VvEtdwIgqIG+qUU2VwWAudUbPZHCLq5I8AXu9Q1WIBSneEmznAKXetGCcVZxrnKsx+JXbDSpBo35zY",
- "lgVdkznNUT/6A6Qgs0q3r3v0eytt1BdrqzHTEDE/5VSTHIwq95rxkxUO593LnmY46Ashz2osxHn+Ajgo",
- "ppI4I/3JfkV+6pa/dLwVgxDtZ89vPvcF4GGPeWUd5EeHThQ+OkR5p7HS9GD/bKp7wXgSJbKTJZCCcQzu",
- "6dAWuW+kNk9ADxp7j9v1U65X3BDSOc1ZRvX1yKHL4npn0Z6ODtW0NqKjifm1foi50BYiKWl6hh6U0YLp",
- "ZTWbpKKYehVguhC1OjDNKBSC47dsSks2VSWk0/NHW8SxG/ArEmFXl+OR4zrq1n21buDYgrpz1jYQ/7cW",
- "5N5PL0/I1O2UumdDNOzQgW89orW5FwItI7dZvA0xtjEqp/yUH8KccWa+75/yjGo6nVHFUjWtFMgfaE55",
- "CpOFIPvEDXlINT3lPRY/+AoAAygdNGU1y1lKzsKruDmaNrKzP8Lp6XtDIKenH3oW0/7F6aaKnlE7QXLB",
- "9FJUOnGha4mECyqzCOiqDl3CkW3g6aZZx8SNbSnShca58eOsmpalSnKR0jxRmmqIL78sc7P8gAwVwU7o",
- "cSdKC+mZoOGMFhrc3zfC2YwlvfBxj5UCRf6roOV7xvUHkpxWe3tPgByU5Ssz5rGB478crzE0uS6hpd/v",
- "GCvRDBbT7XHhVqCClZY0KekCVHT5GmiJu48XdYFm6Twn2C3ESe1vxKGaBXh8DG+AhePK0SC4uGPby79B",
- "iC8BP+EWYhvDnRpj4XX3ywz1s8gNkV17u4IxortU6WViznZ0VcqQuN+ZOjR5YXiyt+AqtuDmELgo7hmQ",
- "dAnpGWQYUApFqdfjVnfvJHA3nGcdTNnAaxv0gdGBaAqZAanKjDoZgPJ1N0xLgdY+Nu0dnMH6RDTBhVeJ",
- "y7ocj1IbCp0Ymhk6qEipwWVkiDU8tm6M7uY7h5OBlJYlWeRi5k53TRb7NV34PsMH2d6Qt3CIY0RRo2ED",
- "vZdURhBhiX8ABddYqBnvRqQfW15JpWYpK+36d4tCe9vqYwbZdrlErxMx794aPaYeZWK2cTKjKn6BgPli",
- "9sOcoa4/zs9krYq4ggnBx3uOcGc5yiK1K9CebCpR6PLLtq+RhkCLUwlI3tzqHow2RkLxYUmVf4CA7zT8",
- "gdnpoh1yWtROJ0NF3uuE+l4jOTEzbw7ndAj/w1GzR4ErKXiMUcfEesbWPQzjOj7avov0sbM+YNZHyY7G",
- "V4p4HY9cdENsOwRHKSODHBZ24baxJxQH2j0VbJCB45f5PGccSBLzSlGlRMrsC5KGl7s5wAihDwmxBh6y",
- "8wgxMg7ARms5DkzeiPBs8sVVgOTA0LxO/dhoZw/+hu3W5uaBqhNvt4qhfd7RHKJxE0But7FvhRqPoixp",
- "SENotSK2yQx6KlWMRA1r6ttl+tYfBTngdZy0OGtyFrPWGakCkAyPfbdAbSD32dxc8g8Cp4mEBVMaGr3Z",
- "nFZvCPq8totzoSGZM6l0gip7dHmm0Y8KhcEfTdM4+2mhitgXbiyLcx+c9gzWScbyKr7bbt6/HZpp39T6",
- "k6pmZ7DGSwZouiQzfJFpbqHW9KbNhqmtZ3bjgl/ZBb+it7be3WjJNDUTSyF0Z45vhKo6/GTTYYoQYIw4",
- "+rs2iNIN7AV1n0PIdSzwNtDJUKs1DNNGhg9aDXqHKfNjbxK/AiiGOa8dKbqWQNDduAqGnjjKM8J08KCx",
- "HyU4cAZoWbJs1dHh7agDbjsU4K8gqFuJP+KKGtWDbcFAoK/HAlEkeJuD3dLgzrRPU3m4tslOmDHSV4iQ",
- "gCGEUzHlEyv0EWVIG1//bsPVCdD8b7D+zbTF5Ywux6ObqfwxXLsRt+D6bb29UTyjLduqgC0L3hVRTstS",
- "inOaJ84wMkSaUpw70sTm3o7ymVldXP0+eXnw6q0D3+ieOVBpTWUbV4Xtym9mVUYjFnLggPiH20Za9bqz",
- "FcSCza9fw4TGlIsluEeygSxnuJgjLnu8GkNZcBSdcWUed6ltNZU4m55d4gbbHpS1aa/RiK1lr23No+eU",
- "5V4V9dAOuL9wcY099cpcIRzgxlbBwLib3Cq76Z3u+OloqGsLTwrn2vCMt7Av1RURvBtYZERI1HCRVAu6",
- "NhRkjdN95sSrIjHHL1E5S+NmCz5Thji4tfmaxgQbDwijZsSKDbgQeMWCsUwztYO3rANkMEcUmWhS2oC7",
- "mXAphirO/lkBYRlwbT5JPJWdg2rOpU9T0b9OjezQn8sNbFNWNMPfRMYwQw1JFwjEZgEjtDD3wD2sFU6/",
- "0No0bn4IDINXcFSFM/auxA1OJkcfjpqtt3/ZthSHGYH6/M8Qhn09vj0dkTdbLC2gA3NE0wsN3hYHwzeF",
- "6X2FO6K5EhDc8DIY2+QjuRKRYSp+QbnNFmL6WRy63gqszcD0uhASw+4VRL30TCVzKf6AuCY7NxsViX10",
- "qERxEXtPIuHMXSZaW2WaPFAevyEcg6Q9JMkFH0nbkThwwpHKA9M5vmP1Bi7KLVnbzCYt93X8cIQhJ1M7",
- "fnM4HMy9MJ2cXsxo7JGvEagMTAeNk6ZlitOC+M5+F5zVsKG9wN9Tt2U2Vr0E2QQo999FXVM4+rZIPoOU",
- "FTSPS0kZYr/9MidjC2bTw1QKgvwjbiCbV8tSkcvhYt1gDWqO5mRvHGQ4cruRsXOm2CwHbPHItphRhbdW",
- "bW6tu5jlAddLhc0f79B8WfFMQqaXyiJWCVILsKjK1bbvGegLAE72sN2j5+Q+Wv0VO4cHBotOFhntP3qO",
- "YSn2j73YZefyQG3iKxkylv9wjCVOx+j2sGOYS8qNOom+m7DJ+4ZZ2IbTZLvucpawpeN6289SQTldQNyb",
- "W2yByfbF3USjYQcvPLOZp5SWYk2Yjs8Pmhr+NBCaZtifBYOkoiiYLswB0oIoURh6apKL2En9cDaNlXvw",
- "7+HyH9HFUlq1AboK8+c1ENu7PLZqdIS9oQW00Tom1D4vylnzgNMxxAk58o8UMQNCnfjA4sbMZZaOIp3Z",
- "QnzozbhGJarS8+SvJF1SSVPD/iZD4Caz755Gsj60H3rzqwH+2fEuQYE8j6NeDpC9lyZcX3KfC54UhqNk",
- "D5pQ0OBURp9rC03zeFCL5+jdmKbNQ+8qgJpRkkFyq1rkRgNOfSPC4xsGvCEp1uu5Ej1eeWWfnTIrGScP",
- "Wpkd+vXdKydlFELGnqw3x91JHBK0ZHCO8TXxTTJj3nAvZL7TLtwE+i/rZWk0gFos82c5pgj8ULE8+60J",
- "be8kzpGUp8uoj2NmOv7eZPqql2zPcfSF9JJyDnl0OHtn/u7v1sjt/w+x6zwF4zu27SbEscvtLK4BvA2m",
- "B8pPaNDLdG4mCLHajvWtg8PyhcgIztM8x22orJ/jJ0gO8s8KlI5lHcUPNq4SbVlGL7C5KQjwDKXqCfnJ",
- "ZupdAmm90ERplhVVbl/7QbYA6YysVZkLmo2JGefk5cErYme1fWxGRZsbY4HCXHsVHRtG8HZ/t1Annyor",
- "Hoa5+zib48LMqpXGx7tK06KMRdibFie+AYbxh3ZdFPNC7EzIoZWwlZff7CSGHuZMFkYyrUezPB5pwvxH",
- "a5ouUXRtcZNhkt89qYunShUkN6zzxNXP7/HcGbhdXheb1mVMhNEvLpiyCVrhHNpB/fULF6c6+SD/9vJk",
- "xbmllCiP3vQC6zpo98BZ5703/UYh6yD+ioKLEpVM4ao5bo6xV/QNcTdhTi+roX1NWGcV84m3U8oFZym+",
- "4A1SwtYgu2Svu/hFdnjs3DVL+SPuTmjkcEXT9NThQQ6Lg4l7PCN0iOsbZoOvZlMtddg/NWYVXVJNFqCV",
- "42yQjX0qJmcvYVyBS6eAeX8DPilky9eEHDLqvkxqM/cVyQhDfAcE4B/NtzdOPcKwvDPGURByaHMRgNai",
- "gbkotZGemCYLAcqtp/0kV703fSb4LDWD1YeJz12JY1hXjVm29Uv2hzrwXkrnFTRtX5i2BN0yzc+tcGI7",
- "6UFZukmjL2rrHY4lkxpEcMTblHhzf4DcevxwtA3ktjG8AO9TQ2hwjs5JKPEe7hFGnZerk2DvnOaVpShs",
- "QWxYT/QZGOMRMF4xDk1m1cgFkUavBNwYPK8D/VQqqbYi4E487QRojh7JGENT2plobzpUZ4MRJbhGP8fw",
- "NjYpxQYYR92gEdwoX9cJXQ11B8LEC8wk7RDZTxCGUpUTojIM3OykDIsxDsO4fbK99gXQPwZ9mch215La",
- "k3OVm2jowUsqYvLmyxWklXW4C5sbgpYlSfEFaXBfRC2aTBnlqZjlkdi3w/pjkIcPg2xna/w3lrFjGCXO",
- "I37lmCzv/saOVxZY2yP1xE1DTIlii2tuc9P/Vvc5F4s2IJ/XoLDxjIckEzvdLw3bDN9A9nLBWMZaP1HE",
- "MCThk7Si0lQ/rmmfSWTkUaW0ybe5WSkfzpw5RtY/EIz4rnl9T+3tYn0MQyGJ6WAELdUuPF5T0jx17x9M",
- "m+4yNoKNZ7BpNm3Jiqh9ZSiGwYYwmM+93rvJRT0pE8feiFAfHNMH6G8+8o6UlDkHWnNi+5h1Mbr9qOld",
- "oveaDe4uwkW+4iCxlfQzKQ0T+CFoynJV54OsqxsE/lYjz3XzsVy4lykYOlyrpv6NCij/m4+yt7PYqhlN",
- "1jM0BFxQmfkW0ZvNX5rJQARIN6bShq6yONDzembWuE/7YYWRZ5PoLk9zoRhfJENRFW2PZW3uu6esXRZ1",
- "CExRhXDNQbpsh9oXJUm08O7WTXBsQoXLiX0dJKjBrDoWuMG3Te+ax1uYK4LakjTO5hwukEgoqIFOBk+s",
- "hufchOwX9ruPo/O5AjqZOSLjenpNtr6R8o5zpnpIDKl+ThzL3R6fdx2RgnFuk8mq2HsrblAZKpulFFmV",
- "Wlt/eDDAi147PxncwEqigkDaX2WPp+f4gPZVEO18Buup5avpkvLmJXP7WNucsnYNwduczm7fqrQVv9Py",
- "hV3A4lbg/JLC0nhUCpEnA9rlUf/ZWPcMnLH0DDJi7g7vchrI5Ubuo1JTmw8vlmufRbUsgUP2YEKIEbeK",
- "Uq+9JbGdlaQzOb+nN82/wlmzyr7kdHLc5JTHvaW2yNMN+ZsfZjNXs1UPbziVHWTzRHrFB1gbvYhkNty1",
- "QEDEttcRUAKislDEpJRrPqfZ6Xz3ZbkI6YeB0FuE6LOW4Gff3XfseULCLQuAgSHjigJgP8R71+XhOpCr",
- "VQr669x5A1q4HcD9LohvtJc+coeVDj3bRemIP1823VHrsQjxD+z7p+uz6SytWgJu3tiu/zbkw7F+igF3",
- "YQenFcuzbZvbcv42CazQvfm7c5N/kRRav9vw4/5xc9mErmIt6W4CIiay1tbkwVSBW3cHj67rFvHf4oWR",
- "VpLpNb5U8FoR+z36AvQn4K6igitQU8d7unBDWxvNRR8s6tZNOaufhC0xUZj7Gu1nGlOxvlzRoszBnYvv",
- "783+Ak/++jTbe/LoL7O/7j3bS+Hps+d7e/T5U/ro+ZNH8Pivz57uwaP5d89nj7PHTx/Pnj5++t2z5+mT",
- "p49mT797/pd7vpaUBbSp0/R3zDOXHLw9Sk4MsA1OaMn+BmubWcqQsc9ZRVM8iUavyEf7/qf/7U/YJBVF",
- "UP7W/TpyoSijpdal2p9OLy4uJmGX6QL1rESLKl1O/Tz9zLdvj2o3uQ1vxh21HlBDCripjhQO8Nu7l8cn",
- "5ODt0aQhmNH+aG+yN3mEqSFL4LRko/3RE/wJT88S933qiG20//FyPJougeZ66f4oQEuW+k/qgi4WICcu",
- "eZf56fzx1HvZph+djnlpRl3E3nBYh3/g5e3ntBpbiQttt75EYpA2QblsCmMys68ViBMBeYZ+WKu2GdZW",
- "I+soC4ptB1Wdxq1a4e+/ofKXsezSseRgsYLm9Xve4YJ2Qc1fX+f32V8vI+E+HzpFyh7v7X2CwmTj1ige",
- "L9escPb0FkFs23lvDGh3uB5XeE1zQzdQF60d4YIefbMLOuL4ct6wLWLZ8uV49Owb3qEjbg4OzQm2DALm",
- "+6zwV37GxQX3Lc2VXBUFlWu8cIOUXaFodTnIcttPVZzFdZgPQ5DnPEiX1LL4zNaezsZE1YUZSsmEERyw",
- "xHMGqQSK17yQGJXTZEx32j3YShSvD/6ONt/XB38n35Oh8rfB9FarbjPxn0BHMvr/sG5KOG7k6F+KTY6/",
- "2orB386dd9Or5q4uxDdbF2IHpn23u3dVP77Zqh/ftki6qp8ZUsIFTzimjzsHEpi17mTUr1pGfbb35Jtd",
- "zTHIc5YCOYGiFJJKlq/Jr7yOy76ZCF7znIoHkfIb+U/PRdVI0YH4HqSynX5sRSNk240nrbCEbEyYbiTD",
- "eBHtIMune5MzbhL6UJ7ZeFof4KbGPrENWuusT9Xux7iX9mYSE9IDV8sP66PDXeTy1pqCfBsx2byFr6uV",
- "5v+kFotrFzj/lDdAD44faEb8w51PzJt3Y6ZP955+PgjCXXgjNPkRgzU+MUv/pHaCOFkFzAbTRU8/+tQc",
- "OzAYl/amzVq6VfFjTMWc0LF7i+sK89QeesNPLCO0mYf6XMPMsCu/6GfmiXGKJhvJ18IjbLrsCF120XvH",
- "F+74wo34QpegGo5gKyRPP2I0WsgOekcSS8P9iRwlQZ5yKQqfKFOQOeh0aesddX3ZEbbin4cN85RNSVRu",
- "zF863nXcov4jclyL89dico8dI3Gw48/WfXo5HqUgI8T3i49VN5/ZHOOp6qd/PlcQPpivK4DXL+ddfhGm",
- "iCFQLYiLSCdmF68E5Ytm8r5vHdFyPWvSHYJvguAeU3vpEhnY4+UW8a0bPoLbkiTkDYpDeMD9y7c/o9nj",
- "U97In3pBbwQHAiumsH6BpcU7d2MtLtSFcOvw47DG2YDo0HY6ftQrll1O61K5Q0LFW1fRdaNQ0dzUjAdl",
- "7EPzCi1LoFJd+5Le7g476cx4dBgm3Bd1qBOhTcHcCCgGL1f0JP7bLm7EP6+37q6q811V5+tVdf6sKnMT",
- "kGNZlfcTyQ7X+KL6tP4i+vQbwRO8bYFrL/m10PLldGt8RNCqfOVTxXBh60kLiUJCyAfUZKfrFQZdCS2m",
- "giGdw2TsLtuU6nRZldOP+B8MBr1swi5tXqSpNbNtum9t/ezRrQZQ3NU8/wZqnn95E96NxNHOaiWUdRAa",
- "euuR/pvT4msN9QvwtCOTXXO1rHQmLoI45qam2+BJsi1u9SS9ERnYcdux/P08fxSDG1z8c/8A1Twi/j7L",
- "Y7NpZ5/KMeUeF6a0Wiy1zfEaTSBdd0xoagk/serAthfLtpV/mXcOhOYSaLYmMwBOxMwsutlXXGSnKp3j",
- "hPGHtw1cpRQpKAVZEiZ32wRaHVWO9kC9AU8IOAJcz0KUIHMqrwmsZQmbAe1mNa3Bra0+7tT3od5t+k0b",
- "2J083EYqoSm0rgVG1eTgiu5GULgjTlBUZZ94//wk192+qsT8YZGn4/brCSvwmRunXChIBc9UdDAsHbbt",
- "2GLV/GAtCmzKbH9SPmd1flvrbOhFmBn5t/o9WG/spsZhndnPSlqQRZMmw2rDXG9gVc8l5pH6iS6h+7aR",
- "h7AUjF/n+gtSTOjAImGGiyzuguU5+mbjckcLiAYRmwA59q0C7IZq/wAgTDWIrt+NtyknSLautChLc/50",
- "UvG63xCajm3rA/1r07ZPXC4QHPl6JkCFYraD/MJi1qbxXFJFHBykoGdOQl+4eOw+zOYwJorx1FXjG8q/",
- "wAo4Nq3CI7DlkHaFvPD4t85Z53B06DdKdINEsGUXhhYcEyu/CiHwqlpe137wCc2ebbE6EK8asdL+Pb2g",
- "TCdzIe2NmWChiIgHtT37f1CmXXkSpwNr4cyWrtSEZShunCCJrQqDWV39YJ8ugRWRqCsz1Y9C7uSwbWyr",
- "WhCzMFJxzfxzO6wz72XMr8/7eSc930nPd9LznfR8Jz3fSc930vOd9PyppecvE4FJksTzaf+8Jva4hoy+",
- "SQn/G3q/8jkfnDRCfy3yo5JgRHRzjjdGZmig+dSljkcXejRRsg3xDtPQp2Y6xkmZU6xBt9L+oTGWnwsK",
- "0fj8xzYHkuE1psGTx+T454Nnjx7//vjZd4b7LG0tnLDtfV8YSul1Dg9cBFud4MSHsgGnmGgZI9mo135S",
- "H+Vgpfk5y4Eog6yX2PwQziE3orz1dRKjjPTVoxOg+QuHHMuVQOkfRLbuEI5Z/xRR0SaZxmHOOJWRZOh9",
- "QukhWQssiOCy+/c0qMtbjZmIxwn0N2zbXg3UAYuS9yZ62RoX4OrYuLF38ZGZPfXoJC6R+hdl2QQhcmTW",
- "sKevJpK+m6XXHRxsa6QKd/6+1ah3j/jowcNjO/ZZTAkWJbYUt0pMowXwxLGFZCaytS8Y7OoytLisTZg/",
- "zGRtNnpw5T7cMbivHhg2ixhd6ZapJ1qwKCju1aRY/TKM06Zq38g3r08d7UpSN46Z7A7X5xpB0MV9IclC",
- "iqp8YEvT8jWqxEVJ+dqbwYysiKWoMOc0xnnfLqeuE6X2+OzulZRCfQUf7Xd/t2jB9KqujFJm6yjFsxh2",
- "q/1sx3hTy2Jb1jufwzNSd2egyk5/E/0uu0DH2vRX2ozGkeoXnVoXd4+r/kdcCW+lOGdGcY5y2H4UVsMQ",
- "JltvBhmwLLwaOqk2/N3Q5qfv6MVJqyLJbjx1lTjB88ZS6RJQIKultEheEnNfSkGzlCp8P+IKlH1iiVWv",
- "jiJ2BwQT80v1I33NBT7ZKljiuDvJk+1IbzchJoBRNpHml5Uum2jTA/dcp4WNO1PAn8UU8IM/fIpQzKvd",
- "OZxB0cAd2BS90Cse5VJT9BIOR7wFB+KtbXmrvrve8G0XXuPCdC4IyEtCSZozdFAIrrSsUn3KKZpAO0nH",
- "O+49b9gdFqVe+CZxK3zESO6GOuUUC03XhtGoSDWHWAk9AC+xqWqxAKU7nHgOcMpdK8abotaYwz2xcZ/m",
- "ujYcfWJbFnRN5lj4SpA/QAoyM1pEmLMEDYpKszx3/kQzDRHzU041ycEw/dfMCHRmOG9zqn3krlilx8JA",
- "bQqbUXagPv1P9is+WnDL93YjNG/Zzz4aevxl8j4nLBuE/OjQ5RM7OsQUMY0nsQf7Z3MvFYwnUSIzN77z",
- "yHdpi9w3Mp4noAeNT9Lt+ik3wrQWBBk91dcjh64boHcW7enoUE1rIzreAr/WD7G3rAuRGJURi2GNFkwv",
- "qxlmXvZvXKcLUb93nWYUCsHxWzalJZuqEtLp+aMt8sEN+BWJsKu7m/vPY8QP6cCclnrjsahQd+8H7uVb",
- "SN/6deds3RqidJch9S5D6l0OzbsMqXe7e5ch9S5/6F3+0P+p+UMnGyVEl3Nja0a/1ktjrNFPm0qrNQMP",
- "m7Vy//XdkkxPCDnBOpbU3AFwDpLmJKXKCkbcRsoVbLHURFVpCpDtn/KkBUkqCjfx/ea/Vs09rfb2ngDZ",
- "e9DtY+0WAeft90VRFT/ZMsvfk9PR6ag3koRCnIPLBBbW9bO9tg77v+pxf+mVCEUrDBpXfCVCoqr5nKXM",
- "ojwXRhlYiE58Hxf4BaQBziaaIEzbpKuIT4yLdNE57fKDbaG7f79fofDNQYdc7pKafPpqN5tqot6UB24c",
- "u8cQ71jG52AZX5xp/Inyr92lWvvKFhQ6Ulu5VG8gSdUV42LF5J2M1FRkDCsc4g1X1zZ8/8HwcQXy3F9+",
- "TcG+/ekUs50vhdLTkbma2sX8wo/mfqALO4K7XErJzjFT4ofL/w4AAP//80cbi5XqAAA=",
+ "H4sIAAAAAAAC/+y9e3fbOJIo/lXw0+45eawoOa+eic/psz93nO72nSSdE7tn526c2w2RJQljEuAAoC11",
+ "rr/7PSgAJEiCkvzIq9d/JRbxKBQKhUI9P45SUZSCA9dqtP9xVFJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4",
+ "aN9/I0pLxhej8YiZX0uql6PxiNMCmjam/3gk4V8Vk5CN9rWsYDxS6RIKagbW69K0rkdaJQuRuCEO7BBH",
+ "h6PLDR9olklQqg/lLzxfE8bTvMqAaEm5oqn5pMgF00uil0wR15kwTgQHIuZEL1uNyZxBnqmJX+S/KpDr",
+ "YJVu8uElXTYgJlLk0IfzhShmjIOHCmqg6g0hWpAM5thoSTUxMxhYfUMtiAIq0yWZC7kFVAtECC/wqhjt",
+ "vx8p4BlI3K0U2Dn+dy4B/oBEU7kAPfowji1urkEmmhWRpR057EtQVa4Vwba4xgU7B05Mrwl5XSlNZkAo",
+ "J+9+fEGePHny3CykoFpD5ohscFXN7OGabPfR/iijGvznPq3RfCEk5VlSt3/34wuc/9gtcNdWVCmIH5YD",
+ "84UcHQ4twHeMkBDjGha4Dy3qNz0ih6L5eQZzIWHHPbGNb3VTwvm/6K6kVKfLUjCuI/tC8Cuxn6M8LOi+",
+ "iYfVALTalwZT0gz6fi95/uHjo/Gjvct/e3+Q/Lf789mTyx2X/6IedwsGog3TSkrg6TpZSKB4WpaU9/Hx",
+ "ztGDWooqz8iSnuPm0wJZvetLTF/LOs9pXhk6YakUB/lCKEIdGWUwp1WuiZ+YVDw3bMqM5qidMEVKKc5Z",
+ "BtnYcN+LJUuXJKXKDoHtyAXLc0ODlYJsiNbiq9twmC5DlBi4roUPXNDXi4xmXVswASvkBkmaCwWJFluu",
+ "J3/jUJ6R8EJp7ip1tcuKnCyB4OTmg71sEXfc0HSer4nGfc0IVYQSfzWNCZuTtajIBW5Ozs6wv1uNwVpB",
+ "DNJwc1r3qDm8Q+jrISOCvJkQOVCOyPPnro8yPmeLSoIiF0vQS3fnSVCl4AqImP0TUm22/X8d//KGCEle",
+ "g1J0AW9pekaApyIb3mM3aewG/6cSZsMLtShpeha/rnNWsAjIr+mKFVVBeFXMQJr98veDFkSCriQfAsiO",
+ "uIXOCrrqT3oiK57i5jbTtgQ1Q0pMlTldT8jRnBR09f3e2IGjCM1zUgLPGF8QveKDQpqZezt4iRQVz3aQ",
+ "YbTZsODWVCWkbM4gI/UoGyBx02yDh/GrwdNIVgE4fpBBcOpZtoDDYRWhGXN0zRdS0gUEJDMhvzrOhV+1",
+ "OANeMzgyW+OnUsI5E5WqOw3AiFNvFq+50JCUEuYsQmPHDh2Ge9g2jr0WTsBJBdeUccgM50WghQbLiQZh",
+ "Cibc/JjpX9EzquC7p0MXePN1x92fi+6ub9zxnXYbGyX2SEbuRfPVHdi42NTqv8PjL5xbsUVif+5tJFuc",
+ "mKtkznK8Zv5p9s+joVLIBFqI8BePYgtOdSVh/5Q/NH+RhBxryjMqM/NLYX96XeWaHbOF+Sm3P70SC5Ye",
+ "s8UAMmtYo68p7FbYf8x4cXasV9FHwyshzqoyXFDaepXO1uTocGiT7ZhXJcyD+ikbvipOVv6lcdUeelVv",
+ "5ACQg7grqWl4BmsJBlqazvGf1Rzpic7lH+afssxjODUE7C5aVAo4ZcE795v5yRx5sG8CMwpLqUHqFK/P",
+ "/Y8BQP8uYT7aH/3btNGUTO1XNXXjmhkvx6ODZpzbn6npadfXecg0nwnjdnew6di+CW8fHjNqFBIUVDsw",
+ "/JCL9OxaMJRSlCA1s/s4M+P0TwoOT5ZAM5Ako5pOmkeVlbMG6B07/oz98JUEMnLF/YL/oTkxn80ppNqL",
+ "b0Z0ZcoIcSJQNGVG4rP3iJ3JNEBJVJDCCnnECGdXgvJFM7ll0DVHfe/Q8qE7WmR3Xlq5kmAPvwiz9ObV",
+ "eDAT8nr00iEETpq3MKFm1Fr6NStv7yw2rcrE4SciT9sGnYEa9WOfrYYY6g4fw1ULC8eafgIsKDPqbWCh",
+ "PdBtY0EUJcvhFs7rkqplfxFGwHnymBz/fPDs0ePfHj/7ztzQpRQLSQsyW2tQ5L67V4jS6xwe9FeGDL7K",
+ "dXz07576F1R73K0YQoDrsXc5USdgOIPFGLH6AgPdIeSg4S2VmqWsRGwdZSFG26O0GpIzWJOF0CTDQTJ7",
+ "0+Ooci0rfgsbA1IKGZGkkSC1SEWenINUTESUIm9dC+JaGO5mpfnO7xZackEVMXPjI6/iGchJbD/N6w0F",
+ "BQ2F2nb92KFPVrzBuBuQSknXvX21642szs27y063ke/fDIqUIBO94iSDWbUIbz4yl6IglGTYEdnsG5HB",
+ "saa6UrfAW5rBGmDMRoQg0JmoNKGEi8ywCdM4znUGNKSomkGNkg4ZmV7aW20GRuZOabVYamKEVRHb2qZj",
+ "QlO7KQneQGrgQVlrAmwrO53VvuUSaLYmMwBOxMy92tx7EhdJUdmjvR3H8bwGrPql0YKrlCIFpSBLnNFq",
+ "K2i+nd1lvQFPCDgCXM9ClCBzKq8JrBaa5lsAxTYxcGshxT11+1DvNv2mDexOHm4jleblaqnASETmdBs2",
+ "N4TCHXFyDhKffJ90//wk192+qhwwyLh7/YQV5vgSTrlQkAqeqehgOVU62XZsTaOW8GFWEJyU2EnFgQfU",
+ "Dq+o0vbhz3iGgqhlNzgP9sEphgEevFHMyH/3l0l/7NTwSa4qVd8sqipLITVksTVwWG2Y6w2s6rnEPBi7",
+ "vr60IJWCbSMPYSkY3yHLrsQiiGqneao1Y/3FoZLf3APrKCpbQDSI2ATIsW8VYDdUSg8AYl4tdU8kHKY6",
+ "lFNrwscjpUVZmvOnk4rX/YbQdGxbH+hfm7Z94qK64euZADO79jA5yC8sZq05YkmNxIgjk4KembsJ5T+r",
+ "oejDbA5johhPIdlE+eZYHptW4RHYckgHRG9n8Axm6xyODv1GiW6QCLbswtCCB94BLaH0b7C+Bbmz7AzZ",
+ "x/UhaMpyyEjwM7JrUnZl5K3SfG+2XcS8A5LtCALLRhEsXU+y20nq7U4VEXsjy8mZwhuqhz+F4FvjyUlg",
+ "crkF0TQyqmEnlBME1KtkjQQQNoEVTXW+NveqXsKaXIAEoqpZwbS21rA2OWlRJuEA0ff3hhmdBsQaHvwO",
+ "7KKSOcahguX1t2I8snLSZvhOOpJSCx1OQiuFyCfbWUwPGVEIdjsCpTC7zpzx1VvoPCW1gHRSE6q/am59",
+ "T7XQjCsg/1tUJKUcJb5KQ30FCYl8He97M4O5Mes5mRWtGgxBDgVYQRa/PHzYXfjDh27PmSJzuPAeC6Zh",
+ "Fx0PH+Kz7K1QunW4bonVHUUuE1RMmJvJCY1dnjLZia0dZTvtZFuvcHToJ8UzpZQjXLP8GzOAzslc7bL2",
+ "kEaWVC23rx3H3UkvEwwdW7fddynE/BZWy7JVzEyXwSq2Uke4+Ci6Z14QawV6EhX2SgNgxFIP8ixHjYuY",
+ "dw4kKcCcFLVkpRmysSquNbQ8kv7P/f/cf3+Q/DdN/thLnv/H9MPHp5cPHvZ+fHz5/ff/t/3Tk8vvH/zn",
+ "v8cEZKXZLK7z+5mqpYHUMc4VP+JWaz8X0j6r1k5aE/PPDXeHxMxmeswHS9rpuMU2hHFC7WYjzRlhPF/f",
+ "wh1rByISSgkKOWL4iFX2q5iHDkmO8tRaaSj6eiDb9bcBKfidlyF7VCp4zjgkheAxie4X/PoaP8Z6W648",
+ "0Bnvx6G+XRm7BX8HrPY8u2zmTfGLux2wobe1e9QtbH533I4KMHTFQhUG5CWhJM0ZKjgEV1pWqT7lFJ9Q",
+ "AblGjBL+YTj8qH7hm8Rf8ZFHthvqlFNlcFg/rKKq4TlEVCY/Avi3taoWC1C6I9vNAU65a8U4qTjTOFdh",
+ "9iuxG1aCRMvAxLYs6JrMaY46gD9ACjKrdFvaQY8Rpc0T3eojzTREzE851SQHqjR5zfjJCofzjhmeZjjo",
+ "CyHPaizEef4COCimkjgj/cl+RX7qlr90vBXdd+1nz28+9wXgYY/5MzjIjw7dS+DoEMW9RhPZg/2zqacK",
+ "xpMokZ0sgRSMo1tch7bIfSO0egJ60Og03a6fcr3ihpDOac4yqq9HDl0W1zuL9nR0qKa1ER1tg1/rh5jx",
+ "eSGSkqZnaHscLZheVrNJKoqpfwFNF6J+DU0zCoXg+C2b0pJNVQnp9PzRFnHsBvyKRNjV5XjkuI66dS8H",
+ "N3BsQd05az2f/1sLcu+nlydk6nZK3bPOTXbowCsl8mh1sTUtQ45ZvHXOt95dp/yUH8KccWa+75/yjGo6",
+ "nVHFUjWtFMgfaE55CpOFIPvEDXlINT3lPRY/GD+DrscOmrKa5SyNK17GI+sT3R/h9PS9IZDT0w89q0D/",
+ "4nRTRc+onSC5YHopKp04p89EwgWVWQR0VTv94cjWZXvTrGPixrYU6ZxK3fhxVk3LUiW5SGmeKE01xJdf",
+ "lrlZfkCGimAn9FUhSgvpmaDhjBYa3N83wtlFJL3wHsOVAkV+L2j5nnH9gSSn1d7eEyAHZfnKjHls4Pjd",
+ "8RpDk+sSWuqNHb2MmsFiqg1cuBWoYKUlTUq6ABVdvgZa4u7jRV2gIi3PCXYLcVJb6nGoZgEeH8MbYOG4",
+ "sh8VLu7Y9vLRO/El4CfcQmxjuFOjEL/ufpmhfha5IbJrb1cwRnSXKr1MzNmOrkoZEvc7Uzv1LwxP9lYK",
+ "xRbcHAIX/zADki4hPYMMXbGhKPV63OruDWHuhvOsgykbsmDdpdCvFjVBMyBVmVEnA1C+7jo4KtDae3W+",
+ "gzNYn4jGLfcqHo2X41FqgwgSQzNDBxUpNbiMDLGGx9aN0d18Z1Q1kNKyJItczNzprsliv6YL32f4INsb",
+ "8hYOcYwoajRsoPeSyggiLPEPoOAaCzXj3Yj0Y8trqdN29N9saclwkG2XS/Q6EfPurdFj6lEmZhsnM6ri",
+ "FwiYL2Y/zBnq2pz9TFapiiuYEAx7dYQ7y1EWqc3d9mRT2dI82ji+IdDiVAKSN7e6B6ONkVB8WFLlQ3cw",
+ "wskfmJ0u2iHDXG1YNVTkLav43mskJ2bmzeGcDuF/2N/8KDCXBmFMtTe5Z2zdwzCuIwtsRLH3Oveu5t6/",
+ "fDS+kq/4eOQ8eGLbIThKGRnksLALt409oTjQ7qlggwwcv8znOeNAkpjllSolUmZjrxpe7uYAI4Q+JMQq",
+ "eMjOI8TIOAAbjQU4MHkjwrPJF1cBkgND6wL1Y6OZIfgbtmubm9BuJ95uFUP7vKM5ROMm9MJuY18LNR5F",
+ "WdLQC6Gt3rdNZtB7UsVI1LCmvl6mr/1RkANex0mLsyZnMW2dkSoAyfDYdwueDeQ+m5tL/kFgM5KwYEpD",
+ "8242p9Urgj6v7uJcaEjmTCqd4JM9ujzT6EeFwuCPpmmc/XRsOsrqAOLcB6c9g3WSsbyK77ab92+HZto3",
+ "9ftJVbMzWOMlAzRdkhnGMkctvRumtt4HGxf8yi74Fb219e5GS6apmVgKoTtzfCNU1eEnmw5ThABjxNHf",
+ "tUGUbmAv+PY5hFzHXNaDNxm+ag3DtDEVg1qD3mHK/NibxK8AimHOa0eKriUQdDeugqEljvKMMB2EAvc9",
+ "YQfOAC1Llq06b3g76oDZDgX4KwjqVuKPmKJG9WBbMBC812POVhK8zsFuaXBn2qBuHq5tshNmjPQVIiRg",
+ "COFUTPmUJH1EGdLGuPltuDoBmv8N1n83bXE5o8vx6GZP/hiu3YhbcP223t4onlGXbZ+ALQ3eFVFOy1KK",
+ "c5onTjEyRJpSnDvSxOZej/KZWV38+X3y8uDVWwe+eXvmQKVVlW1cFbYrv5lVmRexkAMHxKc8MNKqfztb",
+ "QSzY/DqOLFSmXCzBhZcHspzhYo647PFqFGXBUXTKlXncpLZVVeJ0enaJG3R7UNaqveZFbDV7bW0ePacs",
+ "909RD+2A+QsX1+hTr8wVwgFurBUMlLvJrbKb3umOn46GurbwpHCuDQHwhc3xoIjgXb8qI0LiCxdJtaBr",
+ "Q0FWOd1nTrwqEnP8EpWzNK624DNliINbna9pTLDxgDBqRqzYgAmBVywYyzRTO1jLOkAGc0SRiSqlDbib",
+ "CZecq+LsXxUQlgHX5pPEU9k5qOZc+gQv/evUyA79udzANtlLM/xNZAwz1JB0gUBsFjBCDXPEV9c/OP1C",
+ "a9W4+SFQDF7BUBXO2LsSNxiZHH04arbW/mVbUxzm0urzP0MYNu/C9kReXm2xtIAOzBFNzDV4WxwM3xSm",
+ "9xXuiOZKQHDDy2Bs0/bkSkSGqfgF5TbPjulnceh6K7A6A9PrQkgMLVEQtdIzlcyl+APiL9m52aiI66dD",
+ "JYqL2HsScdnvMtFaK9NkUPP4DeEYJO0hSS74SNqGxIETjlQeqM4xAtwruCi3ZG1zArXM1/HDEbqcTO34",
+ "zeFwMPfcdHJ6MaOx8HgjUBmYDhojTUsVpwXxnf0uOK1hQ3uBvaduy2w8Rgmy8c/ux/5dUzj6tkg+g5QV",
+ "NI9LSRlivx19lrEFs4mVKgVB5h43kM1IZ6nIZT+yZrAGNUdzsjcOcoO53cjYOVNslgO2eGRbzKjCW6tW",
+ "t9ZdzPKA66XC5o93aL6seCYh00tlEasEqQVYfMrVuu8Z6AsATvaw3aPn5D5q/RU7hwcGi04WGe0/eo5u",
+ "KfaPvdhl5zKobeIrGTKW/3KMJU7HaPawY5hLyo06icYG2bSXwyxsw2myXXc5S9jScb3tZ6mgnC4gbs0t",
+ "tsBk++JuotKwgxee2ZxtSkuxJkzH5wdNDX8acE0z7M+CQVJRFEwX5gBpQZQoDD01aXnspH44mwDOpcrw",
+ "cPmPaGIp7bMBug/mz6sgtnd5bNVoCHtDC2ijdUyoDaHLWROk7BjihBz5QFzMHVKnDLG4MXOZpaNIZ7YQ",
+ "UyQwrvERVel58leSLqmkqWF/kyFwk9l3TyP5UtopEvjVAP/seJegQJ7HUS8HyN5LE64vuc8FTwrDUbIH",
+ "jStocCqjKQmEpnncqcVz9K5P0+ahdxVAzSjJILlVLXKjAae+EeHxDQPekBTr9VyJHq+8ss9OmZWMkwet",
+ "zA79+u6VkzIKIWNpGZrj7iQOCVoyOEf/mvgmmTFvuBcy32kXbgL9l7WyNC+AWizzZzn2EPihYnn298a1",
+ "vZNySlKeLqM2jpnp+FuTI69esj3H0SwAS8o55NHh7J35m79bI7f/P8Wu8xSM79i2m0rKLrezuAbwNpge",
+ "KD+hQS/TuZkgxGrb17d2DssXIiM4TxNy3lBZPztWkADnXxUoHcvXix+sXyXqssy7wOZfIcAzlKon5Ceb",
+ "43oJpBWgitIsK6rcBjtCtgDplKxVmQuajYkZ5+TlwStiZ7V9bC5Sm/9lgcJcexUdHUaQn2I3VyefZC7u",
+ "hrn7OJv9wsyqlcYAdaVpUcY87E2LE98A3fhDvS6KeSF2JuTQStjKy292EkMPcyYLI5nWo1kejzRh/qM1",
+ "TZcoura4yTDJ7564yFOlCtKC1hkW6xQTeO4M3C53kU1dNCbCvC8umLKpjeEc2k79dYSLezp5J//28mTF",
+ "uaWUKI/eFIF1HbR74Kzx3qt+o5B1EH9FwUWJSqZw1TxOx9grGkLdTQrVywdqownrfHw+ZX1KueAsxQDm",
+ "IJlyDbJLk7yLXWSHWO+uWsofcXdCI4crmoqqdg9yWBxMTuUZoUNcXzEbfDWbaqnD/qkxH++SarIArRxn",
+ "g2zsk5g5fQnjClzKEMyYHfBJIVu2JuSQUfNlUqu5r0hG6OI7IAD/aL69cc8jdMs7YxwFIYc25wFoNRqY",
+ "xVUb6YlpshCg3HraIbnqvekzwbDUDFYfJj7rK45hTTVm2dYu2R/qwFspnVXQtH1h2hI0yzQ/t9yJ7aQH",
+ "ZekmjUbU1jscS5g2iOCItSnx6v4AufX44WgbyG2jewHep4bQ4ByNk1DiPdwjjDr3XCc15TnNK0tR2IJY",
+ "t55oGBjjETBeMQ5NTuLIBZFGrwTcGDyvA/1UKqm2IuBOPO0EaI4WyRhDU9qpaG86VGeDESW4Rj/H8DY2",
+ "afMGGEfdoBHcKF/XqZANdQfCxAvMwe4Q2U+Ch1KVE6IydNzspMWLMQ7DuH2ayvYF0D8GfZnIdteS2pNz",
+ "lZtoKOAlFTF58+UK0soa3IVNjUHLkqQYQRrcF1GNJlPm8VTM8lgSnPpjkMESnWxna/w3lrBkGCXOIn5l",
+ "nyxv/saOVxZY2yP1xE1DTIlii2tuc9P/Vvc5F4s2IJ9XobDxjIckEzvdLw3bHM4xeuAZax2iiG5Iwqc3",
+ "xkdTHVzTPpPIyKOP0iZT7eZH+XDO2TGy/gFnxHdN9D21t4u1MQy5JKaDHrRUO/d4TUkT6t4/mDZRbGwE",
+ "689gE9TaYi9R/cqQD4N1YTCfe713k4t6UiaOvRGh3jmmD9DfvOcdKSlzBrTmxPYx63x0+17Tu3jvNRvc",
+ "XYTzfMVBYit5uzVhWItCep7Pge+7zUw02T34tTHIo80EE9kugLtMtm2fxp09q+ZzSDU73+Jp/l9GYm28",
+ "mMdeprVJxQPHc1Z76viaQFcUtRuANjmCb4QniLC/MThDfqZnsL6nSDuf8mH0/DlCvU7YF2IAsw8khkSE",
+ "imn/7SPcKWSZqikDseCtbbY7NIlfBpNi1u5esTw/O83lSZJQJ2fVSXSG8nCKmBS/01ym6w6OV433Nrpk",
+ "DDmj97PEDd9eNi+gqhMa10V/AmcK81jrJlu6cGFnGBdQ6518ABoo/5sPobGz2GJSTdpO1PJdUJn5FlGx",
+ "1UvEyYB7V9dh2vqlszjQ83pm1vhG9H2GIzHR6AuT5kIxvkiGXKba7gi1Lv+eskYXVBBg+j2Eaw7SpevV",
+ "vlZXooX3pdgExyZUuFIR10GCGkyZZYEbDFx810RmYiIYaiu1OYNSuEAioaAGOhnETw7PuQnZL+x37yTr",
+ "E4F00u5ExvX0mmwNgPReMUz1kBhS/Zy423K78+113guMc5sNXcWCKblBZahJKqXIqtRe0OHBAP+u2jke",
+ "eAMriUr5aX+VPYEtx+j4V0Eowxmsp1ZoSpeUN2kK2sfaJkW3awgC7zq7fatPqbjAmi/sAha3AueXfAmN",
+ "R6UQeTKgOjrqx4R2z8AZS88gI+bu8PbkgTyV5D5qLGrbwMVy7dOAlyVwyB5MCDFvqaLUa28maKcc6kzO",
+ "7+lN869w1qyyYdrukTY55XFXCFv78Ib8zQ+zmavZYsA3nMoOsnkiveIDrI1eRLK27lo3J6K472bSbIjK",
+ "QhGTUq4ZK7fT+e4/1CKkH0Y5bHn/nLVedTapRkdZLyTc8usu0FJe8XXXj9/YdXm4DuRqlYL+OnfegBZu",
+ "B3C/C+Ib1UQfucMaBT3bRaMQz01guqNKwyIEs2cQBJX8/uh3ImHuCrE+fIgTPHw4dk1/f9z+bF5fDx9G",
+ "T+ZnU2a0yvO4eWMU8/ch4641YA74EXT2o2J5to0wWl4hTWY79Hv4zfnPfJHcer/ZJ3L/qLo0Y1dRo3Y3",
+ "ARETWWtr8mCqwN9jB1cP1y3i2IGXTVpJptcYwuRfVOy3aGj4T7USxtV8qx3BnR+yLTfq3JIalU1TIfIn",
+ "Yas2FeauR8W6xhTVL1e0KHNwB+X7e7O/wJO/Ps32njz6y+yve8/2Unj67PneHn3+lD56/uQRPP7rs6d7",
+ "8Gj+3fPZ4+zx08ezp4+ffvfsefrk6aPZ0++e/+WeL89oAW1KH/4DE1AmB2+PkhMDbIMTWjJM7X6J4vRc",
+ "+GR2NMWTaN4k+Wjf//T/+xM2SUURVJR3v46cj9poqXWp9qfTi4uLSdhlusA3WqJFlS6nfp5+RvC3R7X/",
+ "jI17wB21rhGGFHBTHSkc4Ld3L49PyMHbo0lDMKP90d5kb/IIc8aWwGnJRvujJ/gTnp4l7vvUEdto/+Pl",
+ "eDRdAs310v1RgJYs9Z/UBV0sQE5cVj/z0/njqTe/Tz+69+mlGXURC+6ynkCB+0c/2Z3TdaFRx1cdDvKp",
+ "KJdmZUxmNoyJOPGRZ+igYZ98hrXVyDrKmgweR0GhRBeJZUPT999/QxWlY1n3Y1kDI4ViG1XRcI3YoIy+",
+ "L53/7K+XET/AD526n4/39j5Brc9xaxSPl2sWDX16iyC2DUA3BrQ7XI8rvKa5oRuo68CPcEGPvtkFHXHU",
+ "fxu2RSxbvhyPnn3DO3TEzcGhOcGWQSRNnxX+ys+4uOC+pbmSq6Kgco0XbpDLLxStLgdZbjuGzWlrh/kw",
+ "BPUfgjxqLW3RbO3pbExUXZWolEwYwWFsXgEZpBIoXvNCorteU0nCaQbAlmF6ffAP1Be/PvgH+Z4MVZQP",
+ "prcv8jYT/wl0pNLJD+umKvJGjv6l2OT4qy3C/+3ceTe9au7q5Xyz9XJ2YNp3u3tXDembrYb0bYukqzr+",
+ "mBIueMIxr+Q5kECtdSejftUy6rO9J9/sao5BnrMUyAkUpZBUsnxNfuV1wMbNRPCa51Q8CKHZyH965q1G",
+ "ig7E9yDH9fRjy5Mh2648abk0ZGPCdCMZtrwdgpy8dfpfF6w3bjJ9UZ5ZR3vv+arGPuMVauusPdbux7iX",
+ "D2sSE9IDM80P66PDXeTy1pqCRDwx2byFr40ieu/S+qQaizDgK3KvxffmU98APTh+oBnxEX2fmDfvxkyf",
+ "7j39fBCEu/BGaPIjOnp8Ypb+SfUEcbIKmA3mkZ9+9Dl7dmAwLh9Wm7U476GNTMWc0LEL0ncVu2rrvuEn",
+ "lhHalGR9rmFm2JVf9FN2xThFk6boa+ERNo9+hC676L3jC3d84UZ8oUtQDUdAH1k1/YiebCE76B1JrBn5",
+ "JzKUBAUMpCh8Bl1B5qDTpfUd7tqyI2zFx40O85RN2ZVuzF861nXcon52CVyLs9di1p8dvXiw48/WfHo5",
+ "HqUgI8T3iw9iMZ/ZHH2x6phgn0QMM2kwn1ejTqnhEg8xRQyBakFcqAoxu3glKF80k/dt64iW62mT7hB8",
+ "EwT3mNpLl+HEHi+3iG9d8RHcliQhb1AcwgPuQ2L/jGqPT3kjf+oFvREcCKyYwsImlhbvzI21uFBXyK5d",
+ "l8PihwOiQ9vo+FGvWHY5rWNrhoSKt67U80ahormpWZPpvq1eoWUJVKprX9LbzWEnnRmPDsNKHK1QoDoI",
+ "KAKKwcsVLYn/sYsZ8c9rrbsr935X7v165d4/65O5ccixrMrbiWSHa3zR97T+Iu/pN4IneNsC117ya6Hl",
+ "y72tMQChVRLP55DiwhaaFxKFhJAPqMlO1ysMmhJaTAVdOofJ2F22KdXpsiqnH/E/6Ax62bhd2oRpU6tm",
+ "23Tf2sL6o1t1oLhpsf5+Om3b9bdNqbiiPFxgNbykEDzmumxr5b3Gj9FQGDTKDnRG8/hQ324SxBb8HbDa",
+ "8+zC6m6K38nXocK7kTjaWa2EsnZCQ2s90n9zWrqVSGM/Tz+2y3ZZbbhrqZaVzsRF0Lcp/zh4tmyLWz1b",
+ "b0QGdty2d38/JShFdwfnEd0/UjXXiEd7efw27WzgHVMuVDGl1WKpbTroaK75umNCU3sUbDi/2hb/bFv5",
+ "OL9zIDSXQLM1mQFwImZm0e08Et0Clo43xsN4G7hKKVJQCrIkzAO5CbTazxw1hHoDnhBwBLiehShB5lRe",
+ "E1jLJDYD2k2AXINb64EcH+hDvdv0mzawO3m4jVSal4elAixxIIoyB1efO4LCHXGCwiv7xPvnJ7nu9lUl",
+ "phqMBKLbryeswKA5TrlQkAqeqeF0EduOLSaICNaiwGbX9yclmsHNDDxwtb6iSrtMl62o2iDNiJliQ36L",
+ "oRgxM/Lf6wix3thNOdQ6CaiVvSCL5leH1Ya53sCqnkvMI6VWXe2HbSMPYSkYv04LGiSs0IGOwgwXWdwF",
+ "y3O01sYlkRYQDSI2AXLsWwXYDRUBA4Aw1SC6jkJvU05Ql0FpUZbm/Omk4nW/ITQd29YH+tembZ+4nGs4",
+ "8vVMgAoFbwf5hcWszfi7pIo4OEhBz5zMvnAe2n2YzWFMFOOpy7IzlM2BFXBsWoVHYMsh7Yp94fFvnbPO",
+ "4ejQb5ToBolgyy4MLTgmaH4VYuFV331djcInVIS2Be1AvGoETfv39IIyncyFdBmMsKZMxKbaSexEmXaV",
+ "jNyrWAunyHRVaSxDceME+a5V6N7qSo375AusiPhhmal+FHInE26jbdWCmIWRimvmA/DMeatlzK/PHnon",
+ "Pd9Jz3fS8530fCc930nPd9LznfT8qaXnL+OTSZLE82kfcBMLtyGjb1LC/4YiWj5nCEoj9NciPz4SjIhu",
+ "zvFGXw0NNJ+6KhNoVI/mVLdO32HFitRMxzgpc4rlKlfahx5jpcqgZpVPlW4zKhleYxo8eUyOfz549ujx",
+ "b4+ffWe4z9KWzQrb3vfJfpVe5/DA+bTVKU+8cxtwijnZ0beN+tdP6v0erDQ/ZzkQZZD1EpsfwjnkRpS3",
+ "1k9iHiP959EJ0PyFQ47lSqD0DyJbdwjHrH+KqGiTTGNCZ5zKSN2EPqH0kKwF1k5xhUB6L6jLW/WiiHsO",
+ "9Dds214NlAyMkvcmetnqKeBKXrmxd7GamT316CSu5sIXZdkEIXJk1rCnr8a3vpvz1x0cbGukCnf+vlU/",
+ "eI/46MHDYzv2OVEJ1i+3FLdKTKMF8MSxhWQmsrWvLe5KuLS4rK2tMcxkbeEKcJWB3DG4rx4YNosYXemW",
+ "qida2yyoA9gkbP0yjNNWddjIN69PHe2iczf2ouwO1+cagRvGfSHJQoqqfGCrWPM1PomLkvK1V4MZWRGr",
+ "1mEGa/T8vl1OXadd7fHZ3Yuuhe8VDOPv/m7RgslaXcW1zJZci+dE7BYG247xpuzNtjx4PiNopETXQEGu",
+ "/ib6XXauj7Xqr7T5kSOFcjplce7Crf5HXAlvpThn5uEc5bB9v6yGIUy23gwyYFl4NXSSb/i7oc1P39GL",
+ "k1bxot146ipxgueNpdIloEBWS2mRTCXmvpSCZilVGFHiahl+YolVr44iegcEEzNO9X1/zQU+2SpY4rg7",
+ "yZNt3283IaaEUTa15peVLhv/0wMXwNPCxp0q4M+iCvjBHz5FKGbp7hzOoL7oDmyKXugVj3KpKVoJhz3e",
+ "ggPx1ra8Vdtdb/i2Ca8xYToTBOQloSTNGRooBFdaVqk+5RRVoJ0U5h3znlfsDotSL3yTuBY+oiR3Q51y",
+ "ijXpa8VoVKSaQ6zaJoCX2FS1WIDSHU48BzjlrhXjTf17zAifWE9Qc10bjj6xLQu6JnOskSfIHyAFmZlX",
+ "RJjFBBWKSrM8d/ZEMw0R81NONcnBMP3XzAh0Zjivc6pt5K6urcfCQKULm2M2iWshfrJfMYzBLd/rjVC9",
+ "ZT83xX2+SCboJFYsyUF+dOgyjB0dYtKYxpLYg/2zmZcKxpMokZkb31nku7RF7hsZzxPQg8Ym6Xb9lBth",
+ "WguCjJ7q65FD1wzQO4v2dHSoprURHWuBX+uHWHTrQiTmyYh180YLppfVDHMx+6jX6ULUEbDTjEIhOH7L",
+ "prRkU1VCOj1/tEU+uAG/IhF2dXdz/3mU+CEdmNNSbzyWKOru/cC9fAsJXb/uLK5bXZTucqbe5Uy9y6p5",
+ "lzP1bnfvcqbeZRS9yyj6PzWj6GSjhOiycGzN8deKPc7Q9bOp21oz8LBZKxtg3yzJ9ISQE6yKSc0dAOcg",
+ "aU5Sqqxg5MrcFmyx1ERVaQqQ7Z/ypAVJKgo38f3mv/aZe1rt7T0Bsveg28fqLQLO2++Loip+shXZvyen",
+ "o9NRbyQJhTgHlxssrBJoe20d9v+rx/2lV3AUtTCoXPF1DYmq5nOWMovyXJjHwEJ0/Pu4wC8gDXA29QRh",
+ "2qZhRXyiX6TzzmkXM2wL3f37/QqlcA465HKX5uTT17/ZVGH1pjxw49g9hnjHMj4Hy/jiTONPlJHtLvna",
+ "V7ag0JDayq56A0mqriEXK03vZKSmRmNY8xBvuLra4fsPho8rkOf+8mtK+O1Pp5j/fCmUno7M1dQu7xd+",
+ "NPcDXdgR3OVSSnaOuRM/XP6/AAAA//9Gwo6X+vEAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index 2ee2e91cd..6e10c1f78 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -340,6 +340,34 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// ParticipationKey defines model for ParticipationKey.
+type ParticipationKey struct {
+
+ // Address the key was generated for.
+ Address string `json:"address"`
+
+ // When registered, this is the first round it may be used.
+ EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
+
+ // When registered, this is the last round it may be used.
+ EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
+
+ // The key's ParticipationID.
+ Id string `json:"id"`
+
+ // AccountParticipation describes the parameters used by this account in consensus protocol.
+ Key AccountParticipation `json:"key"`
+
+ // Round when this key was last used to propose a block.
+ LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
+
+ // Round when this key was last used to generate a state proof.
+ LastStateProof *uint64 `json:"last-state-proof,omitempty"`
+
+ // Round when this key was last used to vote.
+ LastVote *uint64 `json:"last-vote,omitempty"`
+}
+
// PendingTransactionResponse defines model for PendingTransactionResponse.
type PendingTransactionResponse struct {
@@ -406,7 +434,7 @@ type TealValue struct {
// \[tb\] bytes value.
Bytes string `json:"bytes"`
- // \[tt\] value type.
+ // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
Type uint64 `json:"type"`
// \[ui\] uint value.
@@ -588,6 +616,16 @@ type NodeStatusResponse struct {
TimeSinceLastRound uint64 `json:"time-since-last-round"`
}
+// ParticipationKeyResponse defines model for ParticipationKeyResponse.
+type ParticipationKeyResponse struct {
+
+ // Detailed description of a participation key
+ ParticipationKey string `json:"participationKey"`
+}
+
+// ParticipationKeysResponse defines model for ParticipationKeysResponse.
+type ParticipationKeysResponse []ParticipationKey
+
// PendingTransactionsResponse defines model for PendingTransactionsResponse.
type PendingTransactionsResponse struct {
@@ -598,6 +636,13 @@ type PendingTransactionsResponse struct {
TotalTransactions uint64 `json:"total-transactions"`
}
+// PostParticipationResponse defines model for PostParticipationResponse.
+type PostParticipationResponse struct {
+
+ // encoding of the participation id.
+ PartId string `json:"partId"`
+}
+
// PostTransactionsResponse defines model for PostTransactionsResponse.
type PostTransactionsResponse struct {
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index f71def6df..da47ce350 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -24,6 +24,7 @@ import (
"io"
"math"
"net/http"
+ "strings"
"time"
"github.com/labstack/echo/v4"
@@ -33,6 +34,7 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
"github.com/algorand/go-algorand/data"
+ "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -67,13 +69,147 @@ type NodeInterface interface {
StartCatchup(catchpoint string) error
AbortCatchup(catchpoint string) error
Config() config.Local
+ InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error)
+ ListParticipationKeys() ([]account.ParticipationRecord, error)
+ GetParticipationKey(account.ParticipationID) (account.ParticipationRecord, error)
+ RemoveParticipationKey(account.ParticipationID) error
}
-// RegisterParticipationKeys registers participation keys.
-// (POST /v2/register-participation-keys/{address})
-func (v2 *Handlers) RegisterParticipationKeys(ctx echo.Context, address string, params private.RegisterParticipationKeysParams) error {
- // TODO: register participation keys endpoint
- return ctx.String(http.StatusNotImplemented, "Endpoint not implemented.")
+func roundToPtrOrNil(value basics.Round) *uint64 {
+ if value == 0 {
+ return nil
+ }
+ result := uint64(value)
+ return &result
+}
+
+func convertParticipationRecord(record account.ParticipationRecord) generated.ParticipationKey {
+ participationKey := generated.ParticipationKey{
+ Id: record.ParticipationID.String(),
+ Address: record.Account.String(),
+ Key: generated.AccountParticipation{
+ VoteFirstValid: uint64(record.FirstValid),
+ VoteLastValid: uint64(record.LastValid),
+ VoteKeyDilution: record.KeyDilution,
+ },
+ }
+
+ // These are pointers but should always be present.
+ if record.Voting != nil {
+ participationKey.Key.VoteParticipationKey = record.Voting.OneTimeSignatureVerifier[:]
+ }
+ if record.VRF != nil {
+ participationKey.Key.SelectionParticipationKey = record.VRF.PK[:]
+ }
+
+ // Optional fields.
+ if record.EffectiveLast != 0 && record.EffectiveFirst == 0 {
+ // Special case for first valid on round 0
+ zero := uint64(0)
+ participationKey.EffectiveFirstValid = &zero
+ } else {
+ participationKey.EffectiveFirstValid = roundToPtrOrNil(record.EffectiveFirst)
+ }
+ participationKey.EffectiveLastValid = roundToPtrOrNil(record.EffectiveLast)
+ participationKey.LastVote = roundToPtrOrNil(record.LastVote)
+ participationKey.LastBlockProposal = roundToPtrOrNil(record.LastBlockProposal)
+ participationKey.LastVote = roundToPtrOrNil(record.LastVote)
+ participationKey.LastStateProof = roundToPtrOrNil(record.LastStateProof)
+
+ return participationKey
+}
+
+// GetParticipationKeys Return a list of participation keys
+// (GET /v2/participation)
+func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error {
+ partKeys, err := v2.Node.ListParticipationKeys()
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ var response []generated.ParticipationKey
+
+ for _, participationRecord := range partKeys {
+ response = append(response, convertParticipationRecord(participationRecord))
+ }
+
+ return ctx.JSON(http.StatusOK, response)
+}
+
+// AddParticipationKey Add a participation key to the node
+// (POST /v2/participation)
+func (v2 *Handlers) AddParticipationKey(ctx echo.Context) error {
+
+ buf := new(bytes.Buffer)
+ _, err := buf.ReadFrom(ctx.Request().Body)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ partKeyBinary := buf.Bytes()
+
+ if len(partKeyBinary) == 0 {
+ err := fmt.Errorf(errRESTPayloadZeroLength)
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ partID, err := v2.Node.InstallParticipationKey(partKeyBinary)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ response := generated.PostParticipationResponse{PartId: partID.String()}
+ return ctx.JSON(http.StatusOK, response)
+
+}
+
+// DeleteParticipationKeyByID Delete a given participation key by id
+// (DELETE /v2/participation/{participation-id})
+func (v2 *Handlers) DeleteParticipationKeyByID(ctx echo.Context, participationID string) error {
+
+ decodedParticipationID, err := account.ParseParticipationID(participationID)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ err = v2.Node.RemoveParticipationKey(decodedParticipationID)
+
+ if err != nil {
+ if errors.Is(err, account.ErrParticipationIDNotFound) {
+ return notFound(ctx, account.ErrParticipationIDNotFound, "participation id not found", v2.Log)
+ }
+
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ return ctx.NoContent(http.StatusOK)
+}
+
+// GetParticipationKeyByID Get participation key info by id
+// (GET /v2/participation/{participation-id})
+func (v2 *Handlers) GetParticipationKeyByID(ctx echo.Context, participationID string) error {
+
+ decodedParticipationID, err := account.ParseParticipationID(participationID)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ participationRecord, err := v2.Node.GetParticipationKey(decodedParticipationID)
+
+ if err != nil {
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ if participationRecord.IsZero() {
+ return notFound(ctx, account.ErrParticipationIDNotFound, account.ErrParticipationIDNotFound.Error(), v2.Log)
+ }
+
+ response := convertParticipationRecord(participationRecord)
+
+ return ctx.JSON(http.StatusOK, response)
}
// ShutdownNode shuts down the node.
@@ -244,8 +380,7 @@ func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params
// GetSupply gets the current supply reported by the ledger.
// (GET /v2/ledger/supply)
func (v2 *Handlers) GetSupply(ctx echo.Context) error {
- latest := v2.Node.Ledger().Latest()
- totals, err := v2.Node.Ledger().Totals(latest)
+ latest, totals, err := v2.Node.Ledger().LatestTotals()
if err != nil {
err = fmt.Errorf("GetSupply(): round %d, failed: %v", latest, err)
return internalError(ctx, err, errInternalFailure, v2.Log)
@@ -716,7 +851,7 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error {
}
lastRound := ledger.Latest()
- record, err := ledger.Lookup(lastRound, creator)
+ record, _, err := ledger.LookupWithoutRewards(lastRound, creator)
if err != nil {
return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
}
@@ -762,7 +897,9 @@ func (v2 *Handlers) TealCompile(ctx echo.Context) error {
source := buf.String()
ops, err := logic.AssembleString(source)
if err != nil {
- return badRequest(ctx, err, err.Error(), v2.Log)
+ sb := strings.Builder{}
+ ops.ReportProblems("", &sb)
+ return badRequest(ctx, err, sb.String(), v2.Log)
}
pd := logic.HashProgram(ops.Program)
addr := basics.Address(pd)
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index dd89a23ef..e80ca258d 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -163,8 +163,9 @@ func TestGetBlockJsonEncoding(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- totals, err := l.Totals(l.Latest())
+ totalsRound, totals, err := l.LatestTotals()
require.NoError(t, err)
+ require.Equal(t, l.Latest(), totalsRound)
totalRewardUnits := totals.RewardUnits()
poolBal, err := l.Lookup(l.Latest(), poolAddr)
require.NoError(t, err)
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index b248e40d9..31e6053a6 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -21,7 +21,6 @@ import (
"math/rand"
"strconv"
"testing"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -87,6 +86,22 @@ type mockNode struct {
err error
}
+func (m mockNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
+ panic("implement me")
+}
+
+func (m mockNode) ListParticipationKeys() ([]account.ParticipationRecord, error) {
+ panic("implement me")
+}
+
+func (m mockNode) GetParticipationKey(id account.ParticipationID) (account.ParticipationRecord, error) {
+ panic("implement me")
+}
+
+func (m mockNode) RemoveParticipationKey(id account.ParticipationID) error {
+ panic("implement me")
+}
+
func makeMockNode(ledger *data.Ledger, genesisID string, nodeError error) mockNode {
return mockNode{
ledger: ledger,
@@ -171,7 +186,7 @@ func (m mockNode) GetTransactionByID(txid transactions.Txid, rnd basics.Round) (
return node.TxnWithStatus{}, fmt.Errorf("get transaction by id not implemented")
}
-func (m mockNode) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (m mockNode) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
return nil, fmt.Errorf("assemble block not implemented")
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
new file mode 100644
index 000000000..397e66856
--- /dev/null
+++ b/data/abi/abi_encode.go
@@ -0,0 +1,562 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+)
+
+// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
+func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
+ var childT []Type
+
+ switch t.abiTypeID {
+ case String:
+ if len(tupLen) != 1 {
+ return Type{}, fmt.Errorf("string type conversion to tuple need 1 length argument")
+ }
+ childT = make([]Type, tupLen[0])
+ for i := 0; i < tupLen[0]; i++ {
+ childT[i] = byteType
+ }
+ case Address:
+ childT = make([]Type, addressByteSize)
+ for i := 0; i < addressByteSize; i++ {
+ childT[i] = byteType
+ }
+ case ArrayStatic:
+ childT = make([]Type, t.staticLength)
+ for i := 0; i < int(t.staticLength); i++ {
+ childT[i] = t.childTypes[0]
+ }
+ case ArrayDynamic:
+ if len(tupLen) != 1 {
+ return Type{}, fmt.Errorf("dynamic array type conversion to tuple need 1 length argument")
+ }
+ childT = make([]Type, tupLen[0])
+ for i := 0; i < tupLen[0]; i++ {
+ childT[i] = t.childTypes[0]
+ }
+ default:
+ return Type{}, fmt.Errorf("type cannot support conversion to tuple")
+ }
+
+ tuple, err := MakeTupleType(childT)
+ if err != nil {
+ return Type{}, err
+ }
+ return tuple, nil
+}
+
+// Encode is an ABI type method to encode go values into bytes following ABI encoding rules
+func (t Type) Encode(value interface{}) ([]byte, error) {
+ switch t.abiTypeID {
+ case Uint, Ufixed:
+ return encodeInt(value, t.bitSize)
+ case Bool:
+ boolValue, ok := value.(bool)
+ if !ok {
+ return nil, fmt.Errorf("cannot cast value to bool in bool encoding")
+ }
+ if boolValue {
+ return []byte{0x80}, nil
+ }
+ return []byte{0x00}, nil
+ case Byte:
+ byteValue, ok := value.(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot cast value to byte in byte encoding")
+ }
+ return []byte{byteValue}, nil
+ case ArrayStatic, Address:
+ castedType, err := t.typeCastToTuple()
+ if err != nil {
+ return nil, err
+ }
+ return castedType.Encode(value)
+ case ArrayDynamic:
+ dynamicArray, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ castedType, err := t.typeCastToTuple(len(dynamicArray))
+ if err != nil {
+ return nil, err
+ }
+ lengthEncode := make([]byte, lengthEncodeByteSize)
+ binary.BigEndian.PutUint16(lengthEncode, uint16(len(dynamicArray)))
+ encoded, err := castedType.Encode(value)
+ if err != nil {
+ return nil, err
+ }
+ encoded = append(lengthEncode, encoded...)
+ return encoded, nil
+ case String:
+ stringValue, okString := value.(string)
+ if !okString {
+ return nil, fmt.Errorf("cannot cast value to string or array dynamic in encoding")
+ }
+ byteValue := []byte(stringValue)
+ castedType, err := t.typeCastToTuple(len(byteValue))
+ if err != nil {
+ return nil, err
+ }
+ lengthEncode := make([]byte, lengthEncodeByteSize)
+ binary.BigEndian.PutUint16(lengthEncode, uint16(len(byteValue)))
+ encoded, err := castedType.Encode(byteValue)
+ if err != nil {
+ return nil, err
+ }
+ encoded = append(lengthEncode, encoded...)
+ return encoded, nil
+ case Tuple:
+ return encodeTuple(value, t.childTypes)
+ default:
+ return nil, fmt.Errorf("cannot infer type for encoding")
+ }
+}
+
+// encodeInt encodes int-alike golang values to bytes, following ABI encoding rules
+func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
+ var bigInt *big.Int
+
+ switch intValue := intValue.(type) {
+ case int8:
+ bigInt = big.NewInt(int64(intValue))
+ case uint8:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int16:
+ bigInt = big.NewInt(int64(intValue))
+ case uint16:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int32:
+ bigInt = big.NewInt(int64(intValue))
+ case uint32:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int64:
+ bigInt = big.NewInt(intValue)
+ case uint64:
+ bigInt = new(big.Int).SetUint64(intValue)
+ case uint:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int:
+ bigInt = big.NewInt(int64(intValue))
+ case *big.Int:
+ bigInt = intValue
+ default:
+ return nil, fmt.Errorf("cannot infer go type for uint encode")
+ }
+
+ if bigInt.Sign() < 0 {
+ return nil, fmt.Errorf("passed in numeric value should be non negative")
+ }
+
+ bytes := bigInt.Bytes()
+ if len(bytes) > int(bitSize/8) {
+ return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", len(bytes)*8, bitSize)
+ }
+
+ zeroPadding := make([]byte, bitSize/8-uint16(len(bytes)))
+ buffer := append(zeroPadding, bytes...)
+ return buffer, nil
+}
+
+// inferToSlice infers an interface element to a slice of interface{}, returns error if it cannot infer successfully
+func inferToSlice(value interface{}) ([]interface{}, error) {
+ reflectVal := reflect.ValueOf(value)
+ if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
+ return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
+ }
+ if reflectVal.IsNil() {
+ if reflectVal.Kind() == reflect.Slice {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("cannot infer nil value for array kind interface")
+ }
+ values := make([]interface{}, reflectVal.Len())
+ for i := 0; i < reflectVal.Len(); i++ {
+ values[i] = reflectVal.Index(i).Interface()
+ }
+ return values, nil
+}
+
+// encodeTuple encodes slice-of-interface of golang values to bytes, following ABI encoding rules
+func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
+ if len(childT) >= (1 << 16) {
+ return nil, fmt.Errorf("abi child type number exceeds uint16 maximum")
+ }
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ if len(values) != len(childT) {
+ return nil, fmt.Errorf("cannot encode abi tuple: value slice length != child type number")
+ }
+
+ // for each tuple element value, it has a head/tail component
+ // we create slots for head/tail bytes now, store them and concat them later
+ heads := make([][]byte, len(childT))
+ tails := make([][]byte, len(childT))
+ isDynamicIndex := make(map[int]bool)
+
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
+ // if it is a dynamic value, the head component is not pre-determined
+ // we store an empty placeholder first, since we will need it in byte length calculation
+ headsPlaceholder := []byte{0x00, 0x00}
+ heads[i] = headsPlaceholder
+ // we keep track that the index points to a dynamic value
+ isDynamicIndex[i] = true
+ tailEncoding, err := childT[i].Encode(values[i])
+ if err != nil {
+ return nil, err
+ }
+ tails[i] = tailEncoding
+ isDynamicIndex[i] = true
+ } else if childT[i].abiTypeID == Bool {
+ // search previous bool
+ before := findBoolLR(childT, i, -1)
+ // search after bool
+ after := findBoolLR(childT, i, 1)
+ // append to heads and tails
+ if before%8 != 0 {
+ return nil, fmt.Errorf("cannot encode abi tuple: expected before has number of bool mod 8 == 0")
+ }
+ if after > 7 {
+ after = 7
+ }
+ compressed, err := compressBools(values[i : i+after+1])
+ if err != nil {
+ return nil, err
+ }
+ heads[i] = []byte{compressed}
+ i += after
+ isDynamicIndex[i] = false
+ } else {
+ encodeTi, err := childT[i].Encode(values[i])
+ if err != nil {
+ return nil, err
+ }
+ heads[i] = encodeTi
+ isDynamicIndex[i] = false
+ }
+ }
+
+ // adjust heads for dynamic type
+ // since head size can be pre-determined (for we are storing static value and dynamic value index in head)
+ // we accumulate the head size first
+ // (also note that though head size is pre-determined, head value is not necessarily pre-determined)
+ headLength := 0
+ for _, headTi := range heads {
+ headLength += len(headTi)
+ }
+
+ // when we iterate through the heads (byte slice), we need to find heads for dynamic values
+ // the head should correspond to the start index: len( head(x[1]) ... head(x[N]) tail(x[1]) ... tail(x[i-1]) ).
+ tailCurrLength := 0
+ for i := 0; i < len(heads); i++ {
+ if isDynamicIndex[i] {
+ // calculate where the index of dynamic value encoding byte start
+ headValue := headLength + tailCurrLength
+ if headValue >= (1 << 16) {
+ return nil, fmt.Errorf("cannot encode abi tuple: encode length exceeds uint16 maximum")
+ }
+ binary.BigEndian.PutUint16(heads[i], uint16(headValue))
+ }
+ // accumulate the current tailing dynamic encoding bytes length.
+ tailCurrLength += len(tails[i])
+ }
+
+ // concat everything as the abi encoded bytes
+ encoded := make([]byte, 0, headLength+tailCurrLength)
+ for _, head := range heads {
+ encoded = append(encoded, head...)
+ }
+ for _, tail := range tails {
+ encoded = append(encoded, tail...)
+ }
+ return encoded, nil
+}
+
+// compressBools takes a slice of interface{} (which can be casted to bools) length <= 8
+// and compress the bool values into a uint8 integer
+func compressBools(boolSlice []interface{}) (uint8, error) {
+ var res uint8 = 0
+ if len(boolSlice) > 8 {
+ return 0, fmt.Errorf("compressBools: cannot have slice length > 8")
+ }
+ for i := 0; i < len(boolSlice); i++ {
+ temp, ok := boolSlice[i].(bool)
+ if !ok {
+ return 0, fmt.Errorf("compressBools: cannot cast slice element to bool")
+ }
+ if temp {
+ res |= 1 << uint(7-i)
+ }
+ }
+ return res, nil
+}
+
+// decodeUint decodes byte slice into golang int/big.Int
+func decodeUint(encoded []byte, bitSize uint16) (interface{}, error) {
+ if len(encoded) != int(bitSize)/8 {
+ return nil,
+ fmt.Errorf("uint/ufixed decode: expected byte length %d, but got byte length %d", bitSize/8, len(encoded))
+ }
+ switch bitSize / 8 {
+ case 1:
+ return encoded[0], nil
+ case 2:
+ return uint16(new(big.Int).SetBytes(encoded).Uint64()), nil
+ case 3, 4:
+ return uint32(new(big.Int).SetBytes(encoded).Uint64()), nil
+ case 5, 6, 7, 8:
+ return new(big.Int).SetBytes(encoded).Uint64(), nil
+ default:
+ return new(big.Int).SetBytes(encoded), nil
+ }
+}
+
+// Decode is an ABI type method to decode bytes to go values from ABI encoding rules
+func (t Type) Decode(encoded []byte) (interface{}, error) {
+ switch t.abiTypeID {
+ case Uint, Ufixed:
+ return decodeUint(encoded, t.bitSize)
+ case Bool:
+ if len(encoded) != 1 {
+ return nil, fmt.Errorf("boolean byte should be length 1 byte")
+ }
+ if encoded[0] == 0x00 {
+ return false, nil
+ } else if encoded[0] == 0x80 {
+ return true, nil
+ }
+ return nil, fmt.Errorf("single boolean encoded byte should be of form 0x80 or 0x00")
+ case Byte:
+ if len(encoded) != 1 {
+ return nil, fmt.Errorf("byte should be length 1")
+ }
+ return encoded[0], nil
+ case ArrayStatic:
+ castedType, err := t.typeCastToTuple()
+ if err != nil {
+ return nil, err
+ }
+ return castedType.Decode(encoded)
+ case Address:
+ if len(encoded) != addressByteSize {
+ return nil, fmt.Errorf("address should be length 32")
+ }
+ return encoded, nil
+ case ArrayDynamic:
+ if len(encoded) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("dynamic array format corrupted")
+ }
+ dynamicLen := binary.BigEndian.Uint16(encoded[:lengthEncodeByteSize])
+ castedType, err := t.typeCastToTuple(int(dynamicLen))
+ if err != nil {
+ return nil, err
+ }
+ return castedType.Decode(encoded[lengthEncodeByteSize:])
+ case String:
+ if len(encoded) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("string format corrupted")
+ }
+ stringLenBytes := encoded[:lengthEncodeByteSize]
+ byteLen := binary.BigEndian.Uint16(stringLenBytes)
+ if len(encoded[lengthEncodeByteSize:]) != int(byteLen) {
+ return nil, fmt.Errorf("string representation in byte: length not matching")
+ }
+ return string(encoded[lengthEncodeByteSize:]), nil
+ case Tuple:
+ return decodeTuple(encoded, t.childTypes)
+ default:
+ return nil, fmt.Errorf("cannot infer type for decoding")
+ }
+}
+
+// decodeTuple decodes byte slice with ABI type slice, outputting a slice of golang interface values
+// following ABI encoding rules
+func decodeTuple(encoded []byte, childT []Type) ([]interface{}, error) {
+ dynamicSegments := make([]int, 0, len(childT)+1)
+ valuePartition := make([][]byte, 0, len(childT))
+ iterIndex := 0
+
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
+ if len(encoded[iterIndex:]) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("ill formed tuple dynamic typed value encoding")
+ }
+ dynamicIndex := binary.BigEndian.Uint16(encoded[iterIndex : iterIndex+lengthEncodeByteSize])
+ dynamicSegments = append(dynamicSegments, int(dynamicIndex))
+ valuePartition = append(valuePartition, nil)
+ iterIndex += lengthEncodeByteSize
+ } else if childT[i].abiTypeID == Bool {
+ // search previous bool
+ before := findBoolLR(childT, i, -1)
+ // search after bool
+ after := findBoolLR(childT, i, 1)
+ if before%8 == 0 {
+ if after > 7 {
+ after = 7
+ }
+ // parse bool in a byte to multiple byte strings
+ for boolIndex := uint(0); boolIndex <= uint(after); boolIndex++ {
+ boolMask := 0x80 >> boolIndex
+ if encoded[iterIndex]&byte(boolMask) > 0 {
+ valuePartition = append(valuePartition, []byte{0x80})
+ } else {
+ valuePartition = append(valuePartition, []byte{0x00})
+ }
+ }
+ i += after
+ iterIndex++
+ } else {
+ return nil, fmt.Errorf("expected before bool number mod 8 == 0")
+ }
+ } else {
+ // not bool ...
+ currLen, err := childT[i].ByteLen()
+ if err != nil {
+ return nil, err
+ }
+ valuePartition = append(valuePartition, encoded[iterIndex:iterIndex+currLen])
+ iterIndex += currLen
+ }
+ if i != len(childT)-1 && iterIndex >= len(encoded) {
+ return nil, fmt.Errorf("input byte not enough to decode")
+ }
+ }
+
+ if len(dynamicSegments) > 0 {
+ dynamicSegments = append(dynamicSegments, len(encoded))
+ iterIndex = len(encoded)
+ }
+ if iterIndex < len(encoded) {
+ return nil, fmt.Errorf("input byte not fully consumed")
+ }
+ for i := 0; i < len(dynamicSegments)-1; i++ {
+ if dynamicSegments[i] > dynamicSegments[i+1] {
+ return nil, fmt.Errorf("dynamic segment should display a [l, r] space with l <= r")
+ }
+ }
+
+ segIndex := 0
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
+ valuePartition[i] = encoded[dynamicSegments[segIndex]:dynamicSegments[segIndex+1]]
+ segIndex++
+ }
+ }
+
+ values := make([]interface{}, len(childT))
+ for i := 0; i < len(childT); i++ {
+ var err error
+ values[i], err = childT[i].Decode(valuePartition[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return values, nil
+}
+
+// ParseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
+// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
+// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
+func ParseArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationArgs *[][]byte) error {
+ abiTypes := make([]Type, len(argTypes))
+ for i, typeString := range argTypes {
+ abiType, err := TypeOf(typeString)
+ if err != nil {
+ return err
+ }
+ abiTypes[i] = abiType
+ }
+
+ if len(abiTypes) != len(jsonArgs) {
+ return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTypes))
+ }
+
+ // change the input args to be 1 - 14 + 15 (compacting everything together)
+ if len(jsonArgs) > 14 {
+ compactedType, err := MakeTupleType(abiTypes[14:])
+ if err != nil {
+ return err
+ }
+ abiTypes = append(abiTypes[:14], compactedType)
+
+ remainingJSON := "[" + strings.Join(jsonArgs[14:], ",") + "]"
+ jsonArgs = append(jsonArgs[:14], remainingJSON)
+ }
+
+ // parse JSON value to ABI encoded bytes
+ for i := 0; i < len(jsonArgs); i++ {
+ interfaceVal, err := abiTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
+ if err != nil {
+ return err
+ }
+ abiEncoded, err := abiTypes[i].Encode(interfaceVal)
+ if err != nil {
+ return err
+ }
+ *applicationArgs = append(*applicationArgs, abiEncoded)
+ }
+ return nil
+}
+
+// ParseMethodSignature parses a method of format `method(argType1,argType2,...)retType`
+// into `method` {`argType1`,`argType2`,..} and `retType`
+func ParseMethodSignature(methodSig string) (name string, argTypes []string, returnType string, err error) {
+ argsStart := strings.Index(methodSig, "(")
+ if argsStart == -1 {
+ err = fmt.Errorf("Invalid method signature: %s", methodSig)
+ return
+ }
+
+ argsEnd := -1
+ depth := 0
+ for index, char := range methodSig {
+ switch char {
+ case '(':
+ depth++
+ case ')':
+ if depth == 0 {
+ err = fmt.Errorf("Unpaired parenthesis in method signature: %s", methodSig)
+ return
+ }
+ depth--
+ if depth == 0 {
+ argsEnd = index
+ break
+ }
+ }
+ }
+
+ if argsEnd == -1 {
+ err = fmt.Errorf("Invalid method signature: %s", methodSig)
+ return
+ }
+
+ name = methodSig[:argsStart]
+ argTypes, err = parseTupleContent(methodSig[argsStart+1 : argsEnd])
+ returnType = methodSig[argsEnd+1:]
+ return
+}
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
new file mode 100644
index 000000000..c585564c6
--- /dev/null
+++ b/data/abi/abi_encode_test.go
@@ -0,0 +1,1003 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "math/big"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/chrismcguire/gobberish"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEncodeValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // encoding test for uint type, iterating through all uint sizes
+ // randomly pick 1000 valid uint values and check if encoded value match with expected
+ for intSize := 8; intSize <= 512; intSize += 8 {
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ uintType, err := makeUintType(intSize)
+ require.NoError(t, err, "make uint type fail")
+
+ for i := 0; i < 1000; i++ {
+ randomInt, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ randomIntByte := randomInt.Bytes()
+ expected := make([]byte, intSize/8-len(randomIntByte))
+ expected = append(expected, randomIntByte...)
+
+ uintEncode, err := uintType.Encode(randomInt)
+ require.NoError(t, err, "encoding from uint type fail")
+
+ require.Equal(t, expected, uintEncode, "encode uint not match with expected")
+ }
+ // 2^[bitSize] - 1 test
+ // check if uint<bitSize> can contain max uint value (2^bitSize - 1)
+ largest := big.NewInt(0).Add(
+ upperLimit,
+ big.NewInt(1).Neg(big.NewInt(1)),
+ )
+ encoded, err := uintType.Encode(largest)
+ require.NoError(t, err, "largest uint encode error")
+ require.Equal(t, largest.Bytes(), encoded, "encode uint largest do not match with expected")
+ }
+
+ // encoding test for ufixed, iterating through all the valid ufixed bitSize and precision
+ // randomly generate 10 big int values for ufixed numerator and check if encoded value match with expected
+ // also check if ufixed can fit max numerator (2^bitSize - 1) under specific byte bitSize
+ for size := 8; size <= 512; size += 8 {
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
+ largest := big.NewInt(0).Add(
+ upperLimit,
+ big.NewInt(1).Neg(big.NewInt(1)),
+ )
+ for precision := 1; precision <= 160; precision++ {
+ typeUfixed, err := makeUfixedType(size, precision)
+ require.NoError(t, err, "make ufixed type fail")
+
+ for i := 0; i < 10; i++ {
+ randomInt, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ encodedUfixed, err := typeUfixed.Encode(randomInt)
+ require.NoError(t, err, "ufixed encode fail")
+
+ randomBytes := randomInt.Bytes()
+ buffer := make([]byte, size/8-len(randomBytes))
+ buffer = append(buffer, randomBytes...)
+ require.Equal(t, buffer, encodedUfixed, "encode ufixed not match with expected")
+ }
+ // (2^[bitSize] - 1) / (10^[precision]) test
+ ufixedLargestEncode, err := typeUfixed.Encode(largest)
+ require.NoError(t, err, "largest ufixed encode error")
+ require.Equal(t, largest.Bytes(), ufixedLargestEncode,
+ "encode ufixed largest do not match with expected")
+ }
+ }
+
+ // encoding test for address, since address is 32 byte, it can be considered as 256 bit uint
+ // randomly generate 1000 uint256 and make address values, check if encoded value match with expected
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), 256)
+ for i := 0; i < 1000; i++ {
+ randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ rand256Bytes := randomAddrInt.Bytes()
+ addrBytesExpected := make([]byte, 32-len(rand256Bytes))
+ addrBytesExpected = append(addrBytesExpected, rand256Bytes...)
+
+ addrBytesActual, err := addressType.Encode(addrBytesExpected)
+ require.NoError(t, err, "address encode fail")
+ require.Equal(t, addrBytesExpected, addrBytesActual, "encode addr not match with expected")
+ }
+
+ // encoding test for bool values
+ for i := 0; i < 2; i++ {
+ boolEncode, err := boolType.Encode(i == 1)
+ require.NoError(t, err, "bool encode fail")
+ expected := []byte{0x00}
+ if i == 1 {
+ expected = []byte{0x80}
+ }
+ require.Equal(t, expected, boolEncode, "encode bool not match with expected")
+ }
+
+ // encoding test for byte values
+ for i := 0; i < (1 << 8); i++ {
+ byteEncode, err := byteType.Encode(byte(i))
+ require.NoError(t, err, "byte encode fail")
+ expected := []byte{byte(i)}
+ require.Equal(t, expected, byteEncode, "encode byte not match with expected")
+ }
+
+ // encoding test for string values, since strings in ABI contain utf-8 symbols
+ // we use `gobberish` to generate random utf-8 symbols
+ // randomly generate utf-8 str from length 1 to 100, each length draw 10 random strs
+ // check if encoded ABI str match with expected value
+ for length := 1; length <= 100; length++ {
+ for i := 0; i < 10; i++ {
+ // generate utf8 strings from `gobberish` at some length
+ utf8Str := gobberish.GenerateString(length)
+ // since string is just type alias of `byte[]`, we need to store number of bytes in encoding
+ utf8ByteLen := len([]byte(utf8Str))
+ lengthBytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(lengthBytes, uint16(utf8ByteLen))
+ expected := append(lengthBytes, []byte(utf8Str)...)
+
+ strEncode, err := stringType.Encode(utf8Str)
+ require.NoError(t, err, "string encode fail")
+ require.Equal(t, expected, strEncode, "encode string not match with expected")
+ }
+ }
+
+ // encoding test for static bool array, the expected behavior of encoding is to
+ // compress multiple bool into a single byte.
+ // input: {T, F, F, T, T}, encode expected: {0b10011000}
+ staticBoolArrType := makeStaticArrayType(boolType, 5)
+ t.Run("static bool array encoding", func(t *testing.T) {
+ inputBase := []bool{true, false, false, true, true}
+ expected := []byte{
+ 0b10011000,
+ }
+ boolArrEncode, err := staticBoolArrType.Encode(inputBase)
+ require.NoError(t, err, "static bool array encoding should not return error")
+ require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
+ })
+
+ // encoding test for static bool array
+ // input: {F, F, F, T, T, F, T, F, T, F, T}, encode expected: {0b00011010, 0b10100000}
+ staticBoolArrType = makeStaticArrayType(boolType, 11)
+ t.Run("static bool array encoding", func(t *testing.T) {
+ inputBase := []bool{false, false, false, true, true, false, true, false, true, false, true}
+ expected := []byte{
+ 0b00011010, 0b10100000,
+ }
+ boolArrEncode, err := staticBoolArrType.Encode(inputBase)
+ require.NoError(t, err, "static bool array encoding should not return error")
+ require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
+ })
+
+ // encoding test for dynamic bool array
+ // input: {F, T, F, T, F, T, F, T, F, T}, encode expected: {0b01010101, 0b01000000}
+ dynamicBoolArrayType := makeDynamicArrayType(boolType)
+ t.Run("dynamic bool array encoding", func(t *testing.T) {
+ inputBase := []bool{false, true, false, true, false, true, false, true, false, true}
+ expected := []byte{
+ 0x00, 0x0A, 0b01010101, 0b01000000,
+ }
+ boolArrEncode, err := dynamicBoolArrayType.Encode(inputBase)
+ require.NoError(t, err, "dynamic bool array encoding should not return error")
+ require.Equal(t, expected, boolArrEncode, "dynamic bool array encode not match expected")
+ })
+
+ // encoding test for dynamic tuple values
+ // input type: (string, bool, bool, bool, bool, string)
+ // input value: ("ABC", T, F, T, F, "DEF")
+ /*
+ encode expected:
+ 0x00, 0x05 (first string start at 5th byte)
+ 0b10100000 (4 bool tuple element compacted together)
+ 0x00, 0x0A (second string start at 10th byte)
+ 0x00, 0x03 (first string byte length 3)
+ byte('A'), byte('B'), byte('C') (first string encoded bytes)
+ 0x00, 0x03 (second string byte length 3)
+ byte('D'), byte('E'), byte('F') (second string encoded bytes)
+ */
+ tupleType, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "type from string for dynamic tuple type should not return error")
+ t.Run("dynamic tuple encoding", func(t *testing.T) {
+ inputBase := []interface{}{
+ "ABC", true, false, true, false, "DEF",
+ }
+ expected := []byte{
+ 0x00, 0x05, 0b10100000, 0x00, 0x0A,
+ 0x00, 0x03, byte('A'), byte('B'), byte('C'),
+ 0x00, 0x03, byte('D'), byte('E'), byte('F'),
+ }
+ stringTupleEncode, err := tupleType.Encode(inputBase)
+ require.NoError(t, err, "string tuple encoding should not return error")
+ require.Equal(t, expected, stringTupleEncode, "string tuple encoding not match expected")
+ })
+
+ // encoding test for tuples with static bool arrays
+ // input type: {bool[2], bool[2]}
+ // input value: ({T, T}, {T, T})
+ /*
+ encode expected:
+ 0b11000000 (first static bool array)
+ 0b11000000 (second static bool array)
+ */
+ tupleType, err = TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
+ t.Run("static bool array tuple encoding", func(t *testing.T) {
+ expected := []byte{
+ 0b11000000,
+ 0b11000000,
+ }
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{true, true},
+ []bool{true, true},
+ })
+ require.NoError(t, err, "encode tuple value should not return error")
+ require.Equal(t, expected, actual, "encode static bool tuple should be equal")
+ })
+
+ // encoding test for tuples with static and dynamic bool arrays
+ // input type: (bool[2], bool[])
+ // input value: ({T, T}, {T, T})
+ /*
+ encode expected:
+ 0b11000000 (first static bool array)
+ 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
+ 0x00, 0x02 (dynamic bool array length 2)
+ 0b11000000 (second static bool array)
+ */
+ tupleType, err = TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
+ t.Run("static/dynamic bool array tuple encoding", func(t *testing.T) {
+ expected := []byte{
+ 0b11000000,
+ 0x00, 0x03,
+ 0x00, 0x02, 0b11000000,
+ }
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{true, true},
+ []bool{true, true},
+ })
+ require.NoError(t, err, "tuple value encoding should not return error")
+ require.Equal(t, expected, actual, "encode static/dynamic bool array tuple should not return error")
+ })
+
+ // encoding test for tuples with all dynamic bool arrays
+ // input type: (bool[], bool[])
+ // input values: ({}, {})
+ /*
+ encode expected:
+ 0x00, 0x04 (first dynamic bool array starts at 4th byte)
+ 0x00, 0x06 (second dynamic bool array starts at 6th byte)
+ 0x00, 0x00 (first dynamic bool array length 0)
+ 0x00, 0x00 (second dynamic bool array length 0)
+ */
+ tupleType, err = TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
+ t.Run("empty dynamic array tuple encoding", func(t *testing.T) {
+ expected := []byte{
+ 0x00, 0x04, 0x00, 0x06,
+ 0x00, 0x00, 0x00, 0x00,
+ }
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{}, []bool{},
+ })
+ require.NoError(t, err, "encode empty dynamic array tuple should not return error")
+ require.Equal(t, expected, actual, "encode empty dynamic array tuple does not match with expected")
+ })
+
+ // encoding test for empty tuple
+ // input: (), expected encoding: ""
+ tupleType, err = TypeOf("()")
+ require.NoError(t, err, "type from string for tuple type should not return error")
+ t.Run("empty tuple encoding", func(t *testing.T) {
+ expected := make([]byte, 0)
+ actual, err := tupleType.Encode([]interface{}{})
+ require.NoError(t, err, "encode empty tuple should not return error")
+ require.Equal(t, expected, actual, "empty tuple encode should not return error")
+ })
+}
+
+func TestDecodeValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // decoding test for uint, iterating through all valid uint bitSize
+ // randomly take 1000 tests on each valid bitSize
+ // generate bytes from random uint values and decode bytes with additional type information
+ for intSize := 8; intSize <= 512; intSize += 8 {
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ uintType, err := makeUintType(intSize)
+ require.NoError(t, err, "make uint type failure")
+ for i := 0; i < 1000; i++ {
+ randBig, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ var expected interface{}
+ if intSize <= 64 && intSize > 32 {
+ expected = randBig.Uint64()
+ } else if intSize <= 32 && intSize > 16 {
+ expected = uint32(randBig.Uint64())
+ } else if intSize == 16 {
+ expected = uint16(randBig.Uint64())
+ } else if intSize == 8 {
+ expected = uint8(randBig.Uint64())
+ } else {
+ expected = randBig
+ }
+
+ encodedUint, err := uintType.Encode(expected)
+ require.NoError(t, err, "uint encode fail")
+
+ actual, err := uintType.Decode(encodedUint)
+ require.NoError(t, err, "decoding uint should not return error")
+ require.Equal(t, expected, actual, "decode uint fail to match expected value")
+ }
+ }
+
+ // decoding test for ufixed, iterating through all valid ufixed bitSize and precision
+ // randomly take 10 tests on each valid setting
+ // generate ufixed bytes and try to decode back with additional type information
+ for size := 8; size <= 512; size += 8 {
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
+ for precision := 1; precision <= 160; precision++ {
+ ufixedType, err := makeUfixedType(size, precision)
+ require.NoError(t, err, "make ufixed type failure")
+ for i := 0; i < 10; i++ {
+ randBig, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ var expected interface{}
+ if size <= 64 && size > 32 {
+ expected = randBig.Uint64()
+ } else if size <= 32 && size > 16 {
+ expected = uint32(randBig.Uint64())
+ } else if size == 16 {
+ expected = uint16(randBig.Uint64())
+ } else if size == 8 {
+ expected = uint8(randBig.Uint64())
+ } else {
+ expected = randBig
+ }
+
+ encodedUfixed, err := ufixedType.Encode(expected)
+ require.NoError(t, err, "ufixed encode fail")
+ require.NoError(t, err, "cast big integer to expected value should not return error")
+
+ actual, err := ufixedType.Decode(encodedUfixed)
+ require.NoError(t, err, "decoding ufixed should not return error")
+ require.Equal(t, expected, actual, "decode ufixed fail to match expected value")
+ }
+ }
+ }
+
+ // decoding test for address, randomly take 1000 tests
+ // address is type alias of byte[32], we generate address value with random 256 bit big int values
+ // we make the expected address value and decode the encoding of expected, check if they match
+ upperLimit := big.NewInt(0).Lsh(big.NewInt(1), 256)
+ for i := 0; i < 1000; i++ {
+ randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ addressBytes := randomAddrInt.Bytes()
+ expected := make([]byte, 32-len(addressBytes))
+ expected = append(expected, addressBytes...)
+
+ actual, err := addressType.Decode(expected)
+ require.NoError(t, err, "decoding address should not return error")
+ require.Equal(t, expected, actual, "decode addr not match with expected")
+ }
+
+ // bool value decoding test
+ for i := 0; i < 2; i++ {
+ boolEncode, err := boolType.Encode(i == 1)
+ require.NoError(t, err, "bool encode fail")
+ actual, err := boolType.Decode(boolEncode)
+ require.NoError(t, err, "decoding bool should not return error")
+ require.Equal(t, i == 1, actual, "decode bool not match with expected")
+ }
+
+ // byte value decoding test, iterating through 256 valid byte value
+ for i := 0; i < (1 << 8); i++ {
+ byteEncode, err := byteType.Encode(byte(i))
+ require.NoError(t, err, "byte encode fail")
+ actual, err := byteType.Decode(byteEncode)
+ require.NoError(t, err, "decoding byte should not return error")
+ require.Equal(t, byte(i), actual, "decode byte not match with expected")
+ }
+
+ // string value decoding test, test from utf string length 1 to 100
+ // randomly take 10 utf-8 strings to make ABI string values
+ // decode the encoded expected value and check if they match
+ for length := 1; length <= 100; length++ {
+ for i := 0; i < 10; i++ {
+ expected := gobberish.GenerateString(length)
+ strEncode, err := stringType.Encode(expected)
+ require.NoError(t, err, "string encode fail")
+ actual, err := stringType.Decode(strEncode)
+ require.NoError(t, err, "decoding string should not return error")
+ require.Equal(t, expected, actual, "encode string not match with expected")
+ }
+ }
+
+ // decoding test for static bool array
+ // expected value: bool[5]: {T, F, F, T, T}
+ // input: 0b10011000
+ t.Run("static bool array decode", func(t *testing.T) {
+ staticBoolArrT, err := TypeOf("bool[5]")
+ require.NoError(t, err, "make static bool array type failure")
+ expected := []interface{}{true, false, false, true, true}
+ actual, err := staticBoolArrT.Decode([]byte{0b10011000})
+ require.NoError(t, err, "decoding static bool array should not return error")
+ require.Equal(t, expected, actual, "static bool array decode do not match expected")
+ })
+
+ // decoding test for static bool array
+ // expected value: bool[11]: F, F, F, T, T, F, T, F, T, F, T
+ // input: 0b00011010, 0b10100000
+ t.Run("static bool array decode", func(t *testing.T) {
+ staticBoolArrT, err := TypeOf("bool[11]")
+ require.NoError(t, err, "make static bool array type failure")
+ expected := []interface{}{false, false, false, true, true, false, true, false, true, false, true}
+ actual, err := staticBoolArrT.Decode([]byte{0b00011010, 0b10100000})
+ require.NoError(t, err, "decoding static bool array should not return error")
+ require.Equal(t, expected, actual, "static bool array decode do not match expected")
+ })
+
+ // decoding test for static uint array
+ // expected input: uint64[8]: {1, 2, 3, 4, 5, 6, 7, 8}
+ /*
+ input: 0, 0, 0, 0, 0, 0, 0, 1 (encoding for uint64 1)
+ 0, 0, 0, 0, 0, 0, 0, 2 (encoding for uint64 2)
+ 0, 0, 0, 0, 0, 0, 0, 3 (encoding for uint64 3)
+ 0, 0, 0, 0, 0, 0, 0, 4 (encoding for uint64 4)
+ 0, 0, 0, 0, 0, 0, 0, 5 (encoding for uint64 5)
+ 0, 0, 0, 0, 0, 0, 0, 6 (encoding for uint64 6)
+ 0, 0, 0, 0, 0, 0, 0, 7 (encoding for uint64 7)
+ 0, 0, 0, 0, 0, 0, 0, 8 (encoding for uint64 8)
+ */
+ t.Run("static uint array decode", func(t *testing.T) {
+ staticUintArrT, err := TypeOf("uint64[8]")
+ require.NoError(t, err, "make static uint array type failure")
+ expected := []interface{}{
+ uint64(1), uint64(2),
+ uint64(3), uint64(4),
+ uint64(5), uint64(6),
+ uint64(7), uint64(8),
+ }
+ arrayEncoded, err := staticUintArrT.Encode(expected)
+ require.NoError(t, err, "uint64 static array encode should not return error")
+ actual, err := staticUintArrT.Decode(arrayEncoded)
+ require.NoError(t, err, "uint64 static array decode should not return error")
+ require.Equal(t, expected, actual, "uint64 static array decode do not match with expected value")
+ })
+
+ // decoding test for dynamic bool array
+ // expected value: bool[]: {F, T, F, T, F, T, F, T, F, T}
+ /*
+ input bytes: 0x00, 0x0A (dynamic bool array length 10)
+ 0b01010101, 0b01000000 (dynamic bool array encoding)
+ */
+ t.Run("dynamic bool array decode", func(t *testing.T) {
+ dynamicBoolArrT, err := TypeOf("bool[]")
+ require.NoError(t, err, "make dynamic bool array type failure")
+ expected := []interface{}{false, true, false, true, false, true, false, true, false, true}
+ inputEncoded := []byte{
+ 0x00, 0x0A, 0b01010101, 0b01000000,
+ }
+ actual, err := dynamicBoolArrT.Decode(inputEncoded)
+ require.NoError(t, err, "decode dynamic array should not return error")
+ require.Equal(t, expected, actual, "decode dynamic array do not match expected")
+ })
+
+ // decoding test for dynamic tuple values
+ // expected value type: (string, bool, bool, bool, bool, string)
+ // expected value: ("ABC", T, F, T, F, "DEF")
+ /*
+ input bytes:
+ 0x00, 0x05 (first string start at 5th byte)
+ 0b10100000 (4 bool tuple element compacted together)
+ 0x00, 0x0A (second string start at 10th byte)
+ 0x00, 0x03 (first string byte length 3)
+ byte('A'), byte('B'), byte('C') (first string encoded bytes)
+ 0x00, 0x03 (second string byte length 3)
+ byte('D'), byte('E'), byte('F') (second string encoded bytes)
+ */
+ t.Run("dynamic tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "make tuple type failure")
+ inputEncode := []byte{
+ 0x00, 0x05, 0b10100000, 0x00, 0x0A,
+ 0x00, 0x03, byte('A'), byte('B'), byte('C'),
+ 0x00, 0x03, byte('D'), byte('E'), byte('F'),
+ }
+ expected := []interface{}{
+ "ABC", true, false, true, false, "DEF",
+ }
+ actual, err := tupleT.Decode(inputEncode)
+ require.NoError(t, err, "decoding dynamic tuple should not return error")
+ require.Equal(t, expected, actual, "dynamic tuple not match with expected")
+ })
+
+ // decoding test for tuple with static bool array
+ // expected type: (bool[2], bool[2])
+ // expected value: ({T, T}, {T, T})
+ /*
+ input bytes:
+ 0b11000000 (first static bool array)
+ 0b11000000 (second static bool array)
+ */
+ t.Run("static bool array tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{true, true},
+ []interface{}{true, true},
+ }
+ encodedInput := []byte{
+ 0b11000000,
+ 0b11000000,
+ }
+ actual, err := tupleT.Decode(encodedInput)
+ require.NoError(t, err, "decode tuple value should not return error")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
+ })
+
+ // decoding test for tuple with static and dynamic bool array
+ // expected type: (bool[2], bool[])
+ // expected value: ({T, T}, {T, T})
+ /*
+ input bytes:
+ 0b11000000 (first static bool array)
+ 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
+ 0x00, 0x02 (dynamic bool array length 2)
+ 0b11000000 (second static bool array)
+ */
+ t.Run("static/dynamic bool array tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{true, true},
+ []interface{}{true, true},
+ }
+ encodedInput := []byte{
+ 0b11000000,
+ 0x00, 0x03,
+ 0x00, 0x02, 0b11000000,
+ }
+ actual, err := tupleT.Decode(encodedInput)
+ require.NoError(t, err, "decode tuple for static/dynamic bool array should not return error")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
+ })
+
+ // decoding test for tuple with all dynamic bool array
+ // expected value: (bool[], bool[])
+ // expected value: ({}, {})
+ /*
+ input bytes:
+ 0x00, 0x04 (first dynamic bool array starts at 4th byte)
+ 0x00, 0x06 (second dynamic bool array starts at 6th byte)
+ 0x00, 0x00 (first dynamic bool array length 0)
+ 0x00, 0x00 (second dynamic bool array length 0)
+ */
+ t.Run("empty dynamic array tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{}, []interface{}{},
+ }
+ encodedInput := []byte{
+ 0x00, 0x04, 0x00, 0x06,
+ 0x00, 0x00, 0x00, 0x00,
+ }
+ actual, err := tupleT.Decode(encodedInput)
+ require.NoError(t, err, "decode tuple for empty dynamic array should not return error")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
+ })
+
+ // decoding test for empty tuple
+ // expected value: ()
+ // byte input: ""
+ t.Run("empty tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("()")
+ require.NoError(t, err, "make empty tuple type should not return error")
+ actual, err := tupleT.Decode([]byte{})
+ require.NoError(t, err, "decode empty tuple should not return error")
+ require.Equal(t, []interface{}{}, actual, "empty tuple encode should not return error")
+ })
+}
+
+func TestDecodeInvalid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // decoding test for *corrupted* static bool array
+ // expected 9 elements for static bool array
+ // encoded bytes have only 8 bool values
+ // should throw error
+ t.Run("corrupted static bool array decode", func(t *testing.T) {
+ inputBase := []byte{0b11111111}
+ arrayType := makeStaticArrayType(boolType, 9)
+ _, err := arrayType.Decode(inputBase)
+ require.Error(t, err, "decoding corrupted static bool array should return error")
+ })
+
+ // decoding test for *corrupted* static bool array
+ // expected 8 elements for static bool array
+ // encoded bytes have 1 byte more (0b00000000)
+ // should throw error
+ t.Run("corrupted static bool array decode", func(t *testing.T) {
+ inputBase := []byte{0b01001011, 0b00000000}
+ arrayType := makeStaticArrayType(boolType, 8)
+ _, err := arrayType.Decode(inputBase)
+ require.Error(t, err, "decoding corrupted static bool array should return error")
+ })
+
+ // decoding test for *corrupted* static uint array
+ // expected 8 uint elements in static uint64[8] array
+ // encoded bytes provide only 7 uint64 encoding
+ // should throw error
+ t.Run("static uint array decode", func(t *testing.T) {
+ inputBase := []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 3,
+ 0, 0, 0, 0, 0, 0, 0, 4,
+ 0, 0, 0, 0, 0, 0, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ }
+ uintTArray, err := TypeOf("uint64[8]")
+ require.NoError(t, err, "make uint64 static array type should not return error")
+ _, err = uintTArray.Decode(inputBase)
+ require.Error(t, err, "corrupted uint64 static array decode should return error")
+ })
+
+ // decoding test for *corrupted* static uint array
+ // expected 7 uint elements in static uint64[7] array
+ // encoded bytes provide 8 uint64 encoding (one more uint64: 7)
+ // should throw error
+ t.Run("static uint array decode", func(t *testing.T) {
+ inputBase := []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 3,
+ 0, 0, 0, 0, 0, 0, 0, 4,
+ 0, 0, 0, 0, 0, 0, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ 0, 0, 0, 0, 0, 0, 0, 7,
+ }
+ uintTArray, err := TypeOf("uint64[7]")
+ require.NoError(t, err, "make uint64 static array type should not return error")
+ _, err = uintTArray.Decode(inputBase)
+ require.Error(t, err, "corrupted uint64 static array decode should return error")
+ })
+
+ // decoding test for *corrupted* dynamic bool array
+ // expected 0x0A (10) bool elements in encoding head
+ // encoded bytes provide only 8 bool elements
+ // should throw error
+ t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
+ inputBase := []byte{
+ 0x00, 0x0A, 0b10101010,
+ }
+ dynamicT := makeDynamicArrayType(boolType)
+ _, err := dynamicT.Decode(inputBase)
+ require.Error(t, err, "decode corrupted dynamic array should return error")
+ })
+
+ // decoding test for *corrupted* dynamic bool array
+ // expected 0x07 (7) bool elements in encoding head
+ // encoded bytes provide 1 byte more (0b00000000)
+ // should throw error
+ t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
+ inputBase := []byte{
+ 0x00, 0x07, 0b10101010, 0b00000000,
+ }
+ dynamicT := makeDynamicArrayType(boolType)
+ _, err := dynamicT.Decode(inputBase)
+ require.Error(t, err, "decode corrupted dynamic array should return error")
+ })
+
+ // decoding test for *corrupted* dynamic tuple value
+ // expected type: (string, bool, bool, bool, bool, string)
+ // expected value: ("ABC", T, F, T, F, "DEF")
+ /*
+ corrupted bytes:
+ 0x00, 0x04 (corrupted: first string start at 4th byte, should be 5th)
+ 0b10100000 (4 bool tuple element compacted together)
+ 0x00, 0x0A (second string start at 10th byte)
+ 0x00, 0x03 (first string byte length 3)
+ byte('A'), byte('B'), byte('C') (first string encoded bytes)
+ 0x00, 0x03 (second string byte length 3)
+ byte('D'), byte('E'), byte('F') (second string encoded bytes)
+ */
+ // the result would be: first string have length 0x0A, 0x00
+ // the length exceeds the segment it allocated: 0x0A, 0x00, 0x03, byte('A'), byte('B'), byte('C')
+ // should throw error
+ t.Run("corrupted dynamic tuple decoding", func(t *testing.T) {
+ inputEncode := []byte{
+ 0x00, 0x04, 0b10100000, 0x00, 0x0A,
+ 0x00, 0x03, byte('A'), byte('B'), byte('C'),
+ 0x00, 0x03, byte('D'), byte('E'), byte('F'),
+ }
+ tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(inputEncode)
+ require.Error(t, err, "corrupted decoding dynamic tuple should return error")
+ })
+
+ // decoding test for *corrupted* tuple with static bool arrays
+ // expected type: (bool[2], bool[2])
+ // expected value: ({T, T}, {T, T})
+ /*
+ corrupted bytes test case 0:
+ 0b11000000
+ 0b11000000
+ 0b00000000 <- corrupted byte, 1 byte more
+
+ corrupted bytes test case 0:
+ 0b11000000
+ <- corrupted byte, 1 byte missing
+ */
+ t.Run("corrupted static bool array tuple decoding", func(t *testing.T) {
+ expectedType, err := TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "make tuple type failure")
+ encodedInput0 := []byte{
+ 0b11000000,
+ 0b11000000,
+ 0b00000000,
+ }
+ _, err = expectedType.Decode(encodedInput0)
+ require.Error(t, err, "decode corrupted tuple value should return error")
+
+ encodedInput1 := []byte{
+ 0b11000000,
+ }
+ _, err = expectedType.Decode(encodedInput1)
+ require.Error(t, err, "decode corrupted tuple value should return error")
+ })
+
+ // decoding test for *corrupted* tuple with static and dynamic bool array
+ // expected type: (bool[2], bool[])
+ // expected value: ({T, T}, {T, T})
+ /*
+ corrupted bytes:
+ 0b11000000 (first static bool array)
+ 0x03 <- corrupted, missing 0x00 byte (second dynamic bool array starts at 3rd byte)
+ 0x00, 0x02 (dynamic bool array length 2)
+ 0b11000000 (second static bool array)
+ */
+ t.Run("corrupted static/dynamic bool array tuple decoding", func(t *testing.T) {
+ encodedInput := []byte{
+ 0b11000000,
+ 0x03,
+ 0x00, 0x02, 0b11000000,
+ }
+ tupleT, err := TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
+ require.Error(t, err, "decode corrupted tuple for static/dynamic bool array should return error")
+ })
+
+ // decoding test for *corrupted* tuple with dynamic bool array
+ // expected type: (bool[], bool[])
+ // expected value: ({}, {})
+ /*
+ corrupted bytes:
+ 0x00, 0x04 (first dynamic bool array starts at 4th byte)
+ 0x00, 0x07 <- corrupted, should be 0x06 (second dynamic bool array starts at 6th byte)
+ 0x00, 0x00 (first dynamic bool array length 0)
+ 0x00, 0x00 (second dynamic bool array length 0)
+
+ first dynamic array starts at 0x04, segment is 0x00, 0x00, 0x00, 1 byte 0x00 more
+ second dynamic array starts at 0x07, and only have 0x00 1 byte
+ */
+ // should return error
+ t.Run("corrupted empty dynamic array tuple decoding", func(t *testing.T) {
+ encodedInput := []byte{
+ 0x00, 0x04, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x00,
+ }
+ tupleT, err := TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
+ require.Error(t, err, "decode corrupted tuple for empty dynamic array should return error")
+ })
+
+ // decoding test for *corrupted* empty tuple
+ // expected value: ()
+ // corrupted input: 0xFF, should be empty byte
+ // should return error
+ t.Run("corrupted empty tuple decoding", func(t *testing.T) {
+ encodedInput := []byte{0xFF}
+ tupleT, err := TypeOf("()")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
+ require.Error(t, err, "decode corrupted empty tuple should return error")
+ })
+}
+
+type testUnit struct {
+ serializedType string
+ value interface{}
+}
+
+func categorySelfRoundTripTest(t *testing.T, category []testUnit) {
+ for _, testObj := range category {
+ abiType, err := TypeOf(testObj.serializedType)
+ require.NoError(t, err, "failure to deserialize type")
+ encodedValue, err := abiType.Encode(testObj.value)
+ require.NoError(t, err, "failure to encode value")
+ actual, err := abiType.Decode(encodedValue)
+ require.NoError(t, err, "failure to decode value")
+ require.Equal(t, testObj.value, actual, "decoded value not equal to expected")
+ jsonEncodedValue, err := abiType.MarshalToJSON(testObj.value)
+ require.NoError(t, err, "failure to encode value to JSON type")
+ jsonActual, err := abiType.UnmarshalFromJSON(jsonEncodedValue)
+ require.NoError(t, err, "failure to decode JSON value back")
+ require.Equal(t, testObj.value, jsonActual, "decode JSON value not equal to expected")
+ }
+}
+
+func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
+ (*pool)[Uint] = make([]testUnit, 200*64)
+ (*pool)[Ufixed] = make([]testUnit, 160*64)
+
+ uintIndex := 0
+ ufixedIndex := 0
+
+ for bitSize := 8; bitSize <= 512; bitSize += 8 {
+ max := new(big.Int).Lsh(big.NewInt(1), uint(bitSize))
+
+ uintT, err := makeUintType(bitSize)
+ require.NoError(t, err, "make uint type failure")
+ uintTstr := uintT.String()
+
+ for j := 0; j < 200; j++ {
+ randVal, err := rand.Int(rand.Reader, max)
+ require.NoError(t, err, "generate random uint, should be no error")
+
+ narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
+ require.NoError(t, err, "cast random uint to nearest primitive failure")
+
+ (*pool)[Uint][uintIndex] = testUnit{serializedType: uintTstr, value: narrowest}
+ uintIndex++
+ }
+
+ for precision := 1; precision <= 160; precision++ {
+ randVal, err := rand.Int(rand.Reader, max)
+ require.NoError(t, err, "generate random ufixed, should be no error")
+
+ narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
+ require.NoError(t, err, "cast random uint to nearest primitive failure")
+
+ ufixedT, err := makeUfixedType(bitSize, precision)
+ require.NoError(t, err, "make ufixed type failure")
+ ufixedTstr := ufixedT.String()
+ (*pool)[Ufixed][ufixedIndex] = testUnit{serializedType: ufixedTstr, value: narrowest}
+ ufixedIndex++
+ }
+ }
+ categorySelfRoundTripTest(t, (*pool)[Uint])
+ categorySelfRoundTripTest(t, (*pool)[Ufixed])
+
+ (*pool)[Byte] = make([]testUnit, 1<<8)
+ for i := 0; i < (1 << 8); i++ {
+ (*pool)[Byte][i] = testUnit{serializedType: byteType.String(), value: byte(i)}
+ }
+ categorySelfRoundTripTest(t, (*pool)[Byte])
+
+ (*pool)[Bool] = make([]testUnit, 2)
+ (*pool)[Bool][0] = testUnit{serializedType: boolType.String(), value: false}
+ (*pool)[Bool][1] = testUnit{serializedType: boolType.String(), value: true}
+ categorySelfRoundTripTest(t, (*pool)[Bool])
+
+ maxAddress := new(big.Int).Lsh(big.NewInt(1), 256)
+ (*pool)[Address] = make([]testUnit, 300)
+ for i := 0; i < 300; i++ {
+ randAddrVal, err := rand.Int(rand.Reader, maxAddress)
+ require.NoError(t, err, "generate random value for address, should be no error")
+ addrBytes := randAddrVal.Bytes()
+ remainBytes := make([]byte, 32-len(addrBytes))
+ addrBytes = append(remainBytes, addrBytes...)
+ (*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
+ }
+ categorySelfRoundTripTest(t, (*pool)[Address])
+
+ (*pool)[String] = make([]testUnit, 400)
+ stringIndex := 0
+ for length := 1; length <= 100; length++ {
+ for i := 0; i < 4; i++ {
+ (*pool)[String][stringIndex] = testUnit{
+ serializedType: stringType.String(),
+ value: gobberish.GenerateString(length),
+ }
+ stringIndex++
+ }
+ }
+ categorySelfRoundTripTest(t, (*pool)[String])
+}
+
+func takeSomeFromCategoryAndGenerateArray(
+ t *testing.T, abiT BaseType, srtIndex int, takeNum uint16, pool *map[BaseType][]testUnit) {
+
+ tempArray := make([]interface{}, takeNum)
+ for i := 0; i < int(takeNum); i++ {
+ index := srtIndex + i
+ if index >= len((*pool)[abiT]) {
+ index = srtIndex
+ }
+ tempArray[i] = (*pool)[abiT][index].value
+ }
+ tempT, err := TypeOf((*pool)[abiT][srtIndex].serializedType)
+ require.NoError(t, err, "type in test uint cannot be deserialized")
+ (*pool)[ArrayStatic] = append((*pool)[ArrayStatic], testUnit{
+ serializedType: makeStaticArrayType(tempT, takeNum).String(),
+ value: tempArray,
+ })
+ (*pool)[ArrayDynamic] = append((*pool)[ArrayDynamic], testUnit{
+ serializedType: makeDynamicArrayType(tempT).String(),
+ value: tempArray,
+ })
+}
+
+func addArrayRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
+ for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += 200 {
+ takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, 20, pool)
+ }
+ takeSomeFromCategoryAndGenerateArray(t, Byte, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Address, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, String, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Bool, 0, 20, pool)
+
+ categorySelfRoundTripTest(t, (*pool)[ArrayStatic])
+ categorySelfRoundTripTest(t, (*pool)[ArrayDynamic])
+}
+
+func addTupleRandomValues(t *testing.T, slotRange BaseType, pool *map[BaseType][]testUnit) {
+ for i := 0; i < 100; i++ {
+ tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(20))
+ require.NoError(t, err, "generate random tuple length should not return error")
+ tupleLen := tupleLenBig.Int64() + 1
+ testUnits := make([]testUnit, tupleLen)
+ for index := 0; index < int(tupleLen); index++ {
+ tupleTypeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(slotRange)+1))
+ require.NoError(t, err, "generate random tuple element type index should not return error")
+ tupleTypeIndex := BaseType(tupleTypeIndexBig.Int64())
+ tupleElemChoiceRange := len((*pool)[tupleTypeIndex])
+
+ tupleElemRangeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(tupleElemChoiceRange)))
+ require.NoError(t, err, "generate random tuple element index in test pool should not return error")
+ tupleElemRangeIndex := tupleElemRangeIndexBig.Int64()
+ tupleElem := (*pool)[tupleTypeIndex][tupleElemRangeIndex]
+ testUnits[index] = tupleElem
+ }
+ elemValues := make([]interface{}, tupleLen)
+ elemTypes := make([]Type, tupleLen)
+ for index := 0; index < int(tupleLen); index++ {
+ elemValues[index] = testUnits[index].value
+ abiT, err := TypeOf(testUnits[index].serializedType)
+ require.NoError(t, err, "deserialize type failure for tuple elements")
+ elemTypes[index] = abiT
+ }
+ tupleT, err := MakeTupleType(elemTypes)
+ require.NoError(t, err, "make tuple type failure")
+ (*pool)[Tuple] = append((*pool)[Tuple], testUnit{
+ serializedType: tupleT.String(),
+ value: elemValues,
+ })
+ }
+}
+
+func TestRandomABIEncodeDecodeRoundTrip(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ testValuePool := make(map[BaseType][]testUnit)
+ addPrimitiveRandomValues(t, &testValuePool)
+ addArrayRandomValues(t, &testValuePool)
+ addTupleRandomValues(t, String, &testValuePool)
+ addTupleRandomValues(t, Tuple, &testValuePool)
+ categorySelfRoundTripTest(t, testValuePool[Tuple])
+}
diff --git a/data/abi/abi_json.go b/data/abi/abi_json.go
new file mode 100644
index 000000000..482419e6b
--- /dev/null
+++ b/data/abi/abi_json.go
@@ -0,0 +1,254 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/algorand/go-algorand/data/basics"
+ "math/big"
+)
+
+func castBigIntToNearestPrimitive(num *big.Int, bitSize uint16) (interface{}, error) {
+ if num.BitLen() > int(bitSize) {
+ return nil, fmt.Errorf("cast big int to nearest primitive failure: %v >= 2^%d", num, bitSize)
+ } else if num.Sign() < 0 {
+ return nil, fmt.Errorf("cannot cast big int to near primitive: %v < 0", num)
+ }
+
+ switch bitSize / 8 {
+ case 1:
+ return uint8(num.Uint64()), nil
+ case 2:
+ return uint16(num.Uint64()), nil
+ case 3, 4:
+ return uint32(num.Uint64()), nil
+ case 5, 6, 7, 8:
+ return num.Uint64(), nil
+ default:
+ return num, nil
+ }
+}
+
+// MarshalToJSON convert golang value to JSON format from ABI type
+func (t Type) MarshalToJSON(value interface{}) ([]byte, error) {
+ switch t.abiTypeID {
+ case Uint:
+ bytesUint, err := encodeInt(value, t.bitSize)
+ if err != nil {
+ return nil, err
+ }
+ return new(big.Int).SetBytes(bytesUint).MarshalJSON()
+ case Ufixed:
+ denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
+ encodedUint, err := encodeInt(value, t.bitSize)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(new(big.Rat).SetFrac(new(big.Int).SetBytes(encodedUint), denom).FloatString(int(t.precision))), nil
+ case Bool:
+ boolValue, ok := value.(bool)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to bool for marshal to JSON")
+ }
+ return json.Marshal(boolValue)
+ case Byte:
+ byteValue, ok := value.(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to byte for marshal to JSON")
+ }
+ return json.Marshal(byteValue)
+ case Address:
+ var addressInternal basics.Address
+ switch valueCasted := value.(type) {
+ case []byte:
+ copy(addressInternal[:], valueCasted[:])
+ return json.Marshal(addressInternal.String())
+ case [addressByteSize]byte:
+ addressInternal = valueCasted
+ return json.Marshal(addressInternal.String())
+ default:
+ return nil, fmt.Errorf("cannot infer to byte slice/array for marshal to JSON")
+ }
+ case ArrayStatic, ArrayDynamic:
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ if t.abiTypeID == ArrayStatic && int(t.staticLength) != len(values) {
+ return nil, fmt.Errorf("length of slice %d != type specific length %d", len(values), t.staticLength)
+ }
+ if t.childTypes[0].abiTypeID == Byte {
+ byteArr := make([]byte, len(values))
+ for i := 0; i < len(values); i++ {
+ tempByte, ok := values[i].(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer byte element from slice")
+ }
+ byteArr[i] = tempByte
+ }
+ return json.Marshal(byteArr)
+ }
+ rawMsgSlice := make([]json.RawMessage, len(values))
+ for i := 0; i < len(values); i++ {
+ rawMsgSlice[i], err = t.childTypes[0].MarshalToJSON(values[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.Marshal(rawMsgSlice)
+ case String:
+ stringVal, ok := value.(string)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to string for marshal to JSON")
+ }
+ return json.Marshal(stringVal)
+ case Tuple:
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ if len(values) != int(t.staticLength) {
+ return nil, fmt.Errorf("tuple element number != value slice length")
+ }
+ rawMsgSlice := make([]json.RawMessage, len(values))
+ for i := 0; i < len(values); i++ {
+ rawMsgSlice[i], err = t.childTypes[i].MarshalToJSON(values[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.Marshal(rawMsgSlice)
+ default:
+ return nil, fmt.Errorf("cannot infer ABI type for marshalling value to JSON")
+ }
+}
+
+// UnmarshalFromJSON convert bytes to golang value following ABI type and encoding rules
+func (t Type) UnmarshalFromJSON(jsonEncoded []byte) (interface{}, error) {
+ switch t.abiTypeID {
+ case Uint:
+ num := new(big.Int)
+ if err := num.UnmarshalJSON(jsonEncoded); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to uint: %v", string(jsonEncoded), err)
+ }
+ return castBigIntToNearestPrimitive(num, t.bitSize)
+ case Ufixed:
+ floatTemp := new(big.Rat)
+ if err := floatTemp.UnmarshalText(jsonEncoded); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: %v", string(jsonEncoded), err)
+ }
+ denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
+ denomRat := new(big.Rat).SetInt(denom)
+ numeratorRat := new(big.Rat).Mul(denomRat, floatTemp)
+ if !numeratorRat.IsInt() {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: precision out of range", string(jsonEncoded))
+ }
+ return castBigIntToNearestPrimitive(numeratorRat.Num(), t.bitSize)
+ case Bool:
+ var elem bool
+ if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bool: %v", string(jsonEncoded), err)
+ }
+ return elem, nil
+ case Byte:
+ var elem byte
+ if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded to byte: %v", err)
+ }
+ return elem, nil
+ case Address:
+ var addrStr string
+ if err := json.Unmarshal(jsonEncoded, &addrStr); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded to string: %v", err)
+ }
+ addr, err := basics.UnmarshalChecksumAddress(addrStr)
+ if err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to address: %v", string(jsonEncoded), err)
+ }
+ return addr[:], nil
+ case ArrayStatic, ArrayDynamic:
+ if t.childTypes[0].abiTypeID == Byte && bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
+ var byteArr []byte
+ err := json.Unmarshal(jsonEncoded, &byteArr)
+ if err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bytes: %v", string(jsonEncoded), err)
+ }
+ if t.abiTypeID == ArrayStatic && len(byteArr) != int(t.staticLength) {
+ return nil, fmt.Errorf("length of slice %d != type specific length %d", len(byteArr), t.staticLength)
+ }
+ outInterface := make([]interface{}, len(byteArr))
+ for i := 0; i < len(byteArr); i++ {
+ outInterface[i] = byteArr[i]
+ }
+ return outInterface, nil
+ }
+ var elems []json.RawMessage
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array: %v", string(jsonEncoded), err)
+ }
+ if t.abiTypeID == ArrayStatic && len(elems) != int(t.staticLength) {
+ return nil, fmt.Errorf("JSON array element number != ABI array elem number")
+ }
+ values := make([]interface{}, len(elems))
+ for i := 0; i < len(elems); i++ {
+ tempValue, err := t.childTypes[0].UnmarshalFromJSON(elems[i])
+ if err != nil {
+ return nil, err
+ }
+ values[i] = tempValue
+ }
+ return values, nil
+ case String:
+ stringEncoded := string(jsonEncoded)
+ if bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
+ var stringVar string
+ if err := json.Unmarshal(jsonEncoded, &stringVar); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
+ }
+ return stringVar, nil
+ } else if bytes.HasPrefix(jsonEncoded, []byte{'['}) {
+ var elems []byte
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
+ }
+ return string(elems), nil
+ } else {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string", stringEncoded)
+ }
+ case Tuple:
+ var elems []json.RawMessage
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array for tuple: %v", string(jsonEncoded), err)
+ }
+ if len(elems) != int(t.staticLength) {
+ return nil, fmt.Errorf("JSON array element number != ABI tuple elem number")
+ }
+ values := make([]interface{}, len(elems))
+ for i := 0; i < len(elems); i++ {
+ tempValue, err := t.childTypes[i].UnmarshalFromJSON(elems[i])
+ if err != nil {
+ return nil, err
+ }
+ values[i] = tempValue
+ }
+ return values, nil
+ default:
+ return nil, fmt.Errorf("cannot cast JSON encoded %s to ABI encoding stuff", string(jsonEncoded))
+ }
+}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
new file mode 100644
index 000000000..d65e3c10a
--- /dev/null
+++ b/data/abi/abi_json_test.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestJSONtoInterfaceValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var testCases = []struct {
+ input string
+ typeStr string
+ expected interface{}
+ }{
+ {
+ input: `[true, [0, 1, 2], 17]`,
+ typeStr: `(bool,byte[],uint64)`,
+ expected: []interface{}{
+ true,
+ []interface{}{byte(0), byte(1), byte(2)},
+ uint64(17),
+ },
+ },
+ {
+ input: `[true, "AAEC", 17]`,
+ typeStr: `(bool,byte[],uint64)`,
+ expected: []interface{}{
+ true,
+ []interface{}{byte(0), byte(1), byte(2)},
+ uint64(17),
+ },
+ },
+ {
+ input: `"AQEEBQEE"`,
+ typeStr: `byte[6]`,
+ expected: []interface{}{byte(1), byte(1), byte(4), byte(5), byte(1), byte(4)},
+ },
+ {
+ input: `[[0, [true, false], "utf-8"], [18446744073709551615, [false, true], "pistachio"]]`,
+ typeStr: `(uint64,bool[2],string)[]`,
+ expected: []interface{}{
+ []interface{}{uint64(0), []interface{}{true, false}, "utf-8"},
+ []interface{}{^uint64(0), []interface{}{false, true}, "pistachio"},
+ },
+ },
+ {
+ input: `[]`,
+ typeStr: `(uint64,bool[2],string)[]`,
+ expected: []interface{}{},
+ },
+ {
+ input: "[]",
+ typeStr: "()",
+ expected: []interface{}{},
+ },
+ {
+ input: "[65, 66, 67]",
+ typeStr: "string",
+ expected: "ABC",
+ },
+ {
+ input: "[]",
+ typeStr: "string",
+ expected: "",
+ },
+ {
+ input: "123.456",
+ typeStr: "ufixed64x3",
+ expected: uint64(123456),
+ },
+ {
+ input: `"optin"`,
+ typeStr: "string",
+ expected: "optin",
+ },
+ {
+ input: `"AAEC"`,
+ typeStr: "byte[3]",
+ expected: []interface{}{byte(0), byte(1), byte(2)},
+ },
+ {
+ input: `["uwu",["AAEC",12.34]]`,
+ typeStr: "(string,(byte[3],ufixed64x3))",
+ expected: []interface{}{"uwu", []interface{}{[]interface{}{byte(0), byte(1), byte(2)}, uint64(12340)}},
+ },
+ {
+ input: `[399,"should pass",[true,false,false,true]]`,
+ typeStr: "(uint64,string,bool[])",
+ expected: []interface{}{uint64(399), "should pass", []interface{}{true, false, false, true}},
+ },
+ }
+
+ for _, testCase := range testCases {
+ abiT, err := TypeOf(testCase.typeStr)
+ require.NoError(t, err, "fail to construct ABI type (%s): %v", testCase.typeStr, err)
+ res, err := abiT.UnmarshalFromJSON([]byte(testCase.input))
+ require.NoError(t, err, "fail to unmarshal JSON to interface: (%s): %v", testCase.input, err)
+ require.Equal(t, testCase.expected, res, "%v not matching with expected value %v", res, testCase.expected)
+ resEncoded, err := abiT.Encode(res)
+ require.NoError(t, err, "fail to encode %v to ABI bytes: %v", res, err)
+ resDecoded, err := abiT.Decode(resEncoded)
+ require.NoError(t, err, "fail to decode ABI bytes of %v: %v", res, err)
+ require.Equal(t, res, resDecoded, "ABI encode-decode round trip: %v not match with expected %v", resDecoded, res)
+ }
+}
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
new file mode 100644
index 000000000..353517027
--- /dev/null
+++ b/data/abi/abi_type.go
@@ -0,0 +1,470 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+/*
+ ABI-Types: uint<N>: An N-bit unsigned integer (8 <= N <= 512 and N % 8 = 0).
+ | byte (alias for uint8)
+ | ufixed <N> x <M> (8 <= N <= 512, N % 8 = 0, and 0 < M <= 160)
+ | bool
+ | address (alias for byte[32])
+ | <type> [<N>]
+ | <type> []
+ | string
+ | (T1, ..., Tn)
+*/
+
+// BaseType is an type-alias for uint32. A BaseType value indicates the type of an ABI value.
+type BaseType uint32
+
+const (
+ // Uint is the index (0) for `Uint` type in ABI encoding.
+ Uint BaseType = iota
+ // Byte is the index (1) for `Byte` type in ABI encoding.
+ Byte
+ // Ufixed is the index (2) for `UFixed` type in ABI encoding.
+ Ufixed
+ // Bool is the index (3) for `Bool` type in ABI encoding.
+ Bool
+ // ArrayStatic is the index (4) for static length array (<type>[length]) type in ABI encoding.
+ ArrayStatic
+ // Address is the index (5) for `Address` type in ABI encoding (an type alias of Byte[32]).
+ Address
+ // ArrayDynamic is the index (6) for dynamic length array (<type>[]) type in ABI encoding.
+ ArrayDynamic
+ // String is the index (7) for `String` type in ABI encoding (an type alias of Byte[]).
+ String
+ // Tuple is the index (8) for tuple `(<type 0>, ..., <type k>)` in ABI encoding.
+ Tuple
+)
+
+// Type is the struct that stores information about an ABI value's type.
+type Type struct {
+ abiTypeID BaseType
+ childTypes []Type
+
+ // only can be applied to `uint` bitSize <N> or `ufixed` bitSize <N>
+ bitSize uint16
+ // only can be applied to `ufixed` precision <M>
+ precision uint16
+
+ // length for static array / tuple
+ /*
+ by ABI spec, len over binary array returns number of bytes
+ the type is uint16, which allows for only lenth in [0, 2^16 - 1]
+ representation of static length can only be constrained in uint16 type
+ */
+ // NOTE may want to change back to uint32/uint64
+ staticLength uint16
+}
+
+// String serialize an ABI Type to a string in ABI encoding.
+func (t Type) String() string {
+ switch t.abiTypeID {
+ case Uint:
+ return fmt.Sprintf("uint%d", t.bitSize)
+ case Byte:
+ return "byte"
+ case Ufixed:
+ return fmt.Sprintf("ufixed%dx%d", t.bitSize, t.precision)
+ case Bool:
+ return "bool"
+ case ArrayStatic:
+ return fmt.Sprintf("%s[%d]", t.childTypes[0].String(), t.staticLength)
+ case Address:
+ return "address"
+ case ArrayDynamic:
+ return t.childTypes[0].String() + "[]"
+ case String:
+ return "string"
+ case Tuple:
+ typeStrings := make([]string, len(t.childTypes))
+ for i := 0; i < len(t.childTypes); i++ {
+ typeStrings[i] = t.childTypes[i].String()
+ }
+ return "(" + strings.Join(typeStrings, ",") + ")"
+ default:
+ panic("Type Serialization Error, fail to infer from abiTypeID (bruh you shouldn't be here)")
+ }
+}
+
+var staticArrayRegexp = regexp.MustCompile(`^([a-z\d\[\](),]+)\[([1-9][\d]*)]$`)
+var ufixedRegexp = regexp.MustCompile(`^ufixed([1-9][\d]*)x([1-9][\d]*)$`)
+
+// TypeOf parses an ABI type string.
+// For example: `TypeOf("(uint64,byte[])")`
+func TypeOf(str string) (Type, error) {
+ switch {
+ case strings.HasSuffix(str, "[]"):
+ arrayArgType, err := TypeOf(str[:len(str)-2])
+ if err != nil {
+ return Type{}, err
+ }
+ return makeDynamicArrayType(arrayArgType), nil
+ case strings.HasSuffix(str, "]"):
+ stringMatches := staticArrayRegexp.FindStringSubmatch(str)
+ // match the string itself, array element type, then array length
+ if len(stringMatches) != 3 {
+ return Type{}, fmt.Errorf("static array ill formated: %s", str)
+ }
+ // guaranteed that the length of array is existing
+ arrayLengthStr := stringMatches[2]
+ // allowing only decimal static array length, with limit size to 2^16 - 1
+ arrayLength, err := strconv.ParseUint(arrayLengthStr, 10, 16)
+ if err != nil {
+ return Type{}, err
+ }
+ // parse the array element type
+ arrayType, err := TypeOf(stringMatches[1])
+ if err != nil {
+ return Type{}, err
+ }
+ return makeStaticArrayType(arrayType, uint16(arrayLength)), nil
+ case strings.HasPrefix(str, "uint"):
+ typeSize, err := strconv.ParseUint(str[4:], 10, 16)
+ if err != nil {
+ return Type{}, fmt.Errorf("ill formed uint type: %s", str)
+ }
+ return makeUintType(int(typeSize))
+ case str == "byte":
+ return byteType, nil
+ case strings.HasPrefix(str, "ufixed"):
+ stringMatches := ufixedRegexp.FindStringSubmatch(str)
+ // match string itself, then type-bitSize, and type-precision
+ if len(stringMatches) != 3 {
+ return Type{}, fmt.Errorf("ill formed ufixed type: %s", str)
+ }
+ // guaranteed that there are 2 uint strings in ufixed string
+ ufixedSize, err := strconv.ParseUint(stringMatches[1], 10, 16)
+ if err != nil {
+ return Type{}, err
+ }
+ ufixedPrecision, err := strconv.ParseUint(stringMatches[2], 10, 16)
+ if err != nil {
+ return Type{}, err
+ }
+ return makeUfixedType(int(ufixedSize), int(ufixedPrecision))
+ case str == "bool":
+ return boolType, nil
+ case str == "address":
+ return addressType, nil
+ case str == "string":
+ return stringType, nil
+ case len(str) >= 2 && str[0] == '(' && str[len(str)-1] == ')':
+ tupleContent, err := parseTupleContent(str[1 : len(str)-1])
+ if err != nil {
+ return Type{}, err
+ }
+ tupleTypes := make([]Type, len(tupleContent))
+ for i := 0; i < len(tupleContent); i++ {
+ ti, err := TypeOf(tupleContent[i])
+ if err != nil {
+ return Type{}, err
+ }
+ tupleTypes[i] = ti
+ }
+ return MakeTupleType(tupleTypes)
+ default:
+ return Type{}, fmt.Errorf("cannot convert a string %s to an ABI type", str)
+ }
+}
+
+// segment keeps track of the start and end of a segment in a string.
+type segment struct{ left, right int }
+
+// parseTupleContent splits an ABI encoded string for tuple type into multiple sub-strings.
+// Each sub-string represents a content type of the tuple type.
+// The argument str is the content between parentheses of tuple, i.e.
+// (...... str ......)
+// ^ ^
+func parseTupleContent(str string) ([]string, error) {
+ // if the tuple type content is empty (which is also allowed)
+ // just return the empty string list
+ if len(str) == 0 {
+ return []string{}, nil
+ }
+
+ // the following 2 checks want to make sure input string can be separated by comma
+ // with form: "...substr_0,...substr_1,...,...substr_k"
+
+ // str should noe have leading/tailing comma
+ if strings.HasSuffix(str, ",") || strings.HasPrefix(str, ",") {
+ return []string{}, fmt.Errorf("parsing error: tuple content should not start with comma")
+ }
+
+ // str should not have consecutive commas contained
+ if strings.Contains(str, ",,") {
+ return []string{}, fmt.Errorf("no consecutive commas")
+ }
+
+ var parenSegmentRecord = make([]segment, 0)
+ var stack []int
+
+ // get the most exterior parentheses segment (not overlapped by other parentheses)
+ // illustration: "*****,(*****),*****" => ["*****", "(*****)", "*****"]
+ // once iterate to left paren (, stack up by 1 in stack
+ // iterate to right paren ), pop 1 in stack
+ // if iterate to right paren ) with stack height 0, find a parenthesis segment "(******)"
+ for index, chr := range str {
+ if chr == '(' {
+ stack = append(stack, index)
+ } else if chr == ')' {
+ if len(stack) == 0 {
+ return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
+ }
+ leftParenIndex := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if len(stack) == 0 {
+ parenSegmentRecord = append(parenSegmentRecord, segment{
+ left: leftParenIndex,
+ right: index,
+ })
+ }
+ }
+ }
+ if len(stack) != 0 {
+ return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
+ }
+
+ // take out tuple-formed type str in tuple argument
+ strCopied := str
+ for i := len(parenSegmentRecord) - 1; i >= 0; i-- {
+ parenSeg := parenSegmentRecord[i]
+ strCopied = strCopied[:parenSeg.left] + strCopied[parenSeg.right+1:]
+ }
+
+ // split the string without parenthesis segments
+ tupleStrSegs := strings.Split(strCopied, ",")
+
+ // the empty strings are placeholders for parenthesis segments
+ // put the parenthesis segments back into segment list
+ parenSegCount := 0
+ for index, segStr := range tupleStrSegs {
+ if segStr == "" {
+ parenSeg := parenSegmentRecord[parenSegCount]
+ tupleStrSegs[index] = str[parenSeg.left : parenSeg.right+1]
+ parenSegCount++
+ }
+ }
+
+ return tupleStrSegs, nil
+}
+
+// makeUintType makes `Uint` ABI type by taking a type bitSize argument.
+// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
+func makeUintType(typeSize int) (Type, error) {
+ if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
+ return Type{}, fmt.Errorf("unsupported uint type bitSize: %d", typeSize)
+ }
+ return Type{
+ abiTypeID: Uint,
+ bitSize: uint16(typeSize),
+ }, nil
+}
+
+var (
+ // byteType is ABI type constant for byte
+ byteType = Type{abiTypeID: Byte}
+
+ // boolType is ABI type constant for bool
+ boolType = Type{abiTypeID: Bool}
+
+ // addressType is ABI type constant for address
+ addressType = Type{abiTypeID: Address}
+
+ // stringType is ABI type constant for string
+ stringType = Type{abiTypeID: String}
+)
+
+// makeUfixedType makes `UFixed` ABI type by taking type bitSize and type precision as arguments.
+// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
+// The range of type precision is [1, 160].
+func makeUfixedType(typeSize int, typePrecision int) (Type, error) {
+ if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
+ return Type{}, fmt.Errorf("unsupported ufixed type bitSize: %d", typeSize)
+ }
+ if typePrecision > 160 || typePrecision < 1 {
+ return Type{}, fmt.Errorf("unsupported ufixed type precision: %d", typePrecision)
+ }
+ return Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(typeSize),
+ precision: uint16(typePrecision),
+ }, nil
+}
+
+// makeStaticArrayType makes static length array ABI type by taking
+// array element type and array length as arguments.
+func makeStaticArrayType(argumentType Type, arrayLength uint16) Type {
+ return Type{
+ abiTypeID: ArrayStatic,
+ childTypes: []Type{argumentType},
+ staticLength: arrayLength,
+ }
+}
+
+// makeDynamicArrayType makes dynamic length array by taking array element type as argument.
+func makeDynamicArrayType(argumentType Type) Type {
+ return Type{
+ abiTypeID: ArrayDynamic,
+ childTypes: []Type{argumentType},
+ }
+}
+
+// MakeTupleType makes tuple ABI type by taking an array of tuple element types as argument.
+func MakeTupleType(argumentTypes []Type) (Type, error) {
+ if len(argumentTypes) >= math.MaxUint16 {
+ return Type{}, fmt.Errorf("tuple type child type number larger than maximum uint16 error")
+ }
+ return Type{
+ abiTypeID: Tuple,
+ childTypes: argumentTypes,
+ staticLength: uint16(len(argumentTypes)),
+ }, nil
+}
+
+// Equal method decides the equality of two types: t == t0.
+func (t Type) Equal(t0 Type) bool {
+ if t.abiTypeID != t0.abiTypeID {
+ return false
+ }
+ if t.precision != t0.precision || t.bitSize != t0.bitSize {
+ return false
+ }
+ if t.staticLength != t0.staticLength {
+ return false
+ }
+ if len(t.childTypes) != len(t0.childTypes) {
+ return false
+ }
+ for i := 0; i < len(t.childTypes); i++ {
+ if !t.childTypes[i].Equal(t0.childTypes[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsDynamic method decides if an ABI type is dynamic or static.
+func (t Type) IsDynamic() bool {
+ switch t.abiTypeID {
+ case ArrayDynamic, String:
+ return true
+ default:
+ for _, childT := range t.childTypes {
+ if childT.IsDynamic() {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// Assume that the current index on the list of type is an ABI bool type.
+// It returns the difference between the current index and the index of the furthest consecutive Bool type.
+func findBoolLR(typeList []Type, index int, delta int) int {
+ until := 0
+ for {
+ curr := index + delta*until
+ if typeList[curr].abiTypeID == Bool {
+ if curr != len(typeList)-1 && delta > 0 {
+ until++
+ } else if curr > 0 && delta < 0 {
+ until++
+ } else {
+ break
+ }
+ } else {
+ until--
+ break
+ }
+ }
+ return until
+}
+
+const (
+ addressByteSize = 32
+ singleByteSize = 1
+ singleBoolSize = 1
+ lengthEncodeByteSize = 2
+)
+
+// ByteLen method calculates the byte length of a static ABI type.
+func (t Type) ByteLen() (int, error) {
+ switch t.abiTypeID {
+ case Address:
+ return addressByteSize, nil
+ case Byte:
+ return singleByteSize, nil
+ case Uint, Ufixed:
+ return int(t.bitSize / 8), nil
+ case Bool:
+ return singleBoolSize, nil
+ case ArrayStatic:
+ if t.childTypes[0].abiTypeID == Bool {
+ byteLen := int(t.staticLength+7) / 8
+ return byteLen, nil
+ }
+ elemByteLen, err := t.childTypes[0].ByteLen()
+ if err != nil {
+ return -1, err
+ }
+ return int(t.staticLength) * elemByteLen, nil
+ case Tuple:
+ size := 0
+ for i := 0; i < len(t.childTypes); i++ {
+ if t.childTypes[i].abiTypeID == Bool {
+ // search after bool
+ after := findBoolLR(t.childTypes, i, 1)
+ // shift the index
+ i += after
+ // get number of bool
+ boolNum := after + 1
+ size += (boolNum + 7) / 8
+ } else {
+ childByteSize, err := t.childTypes[i].ByteLen()
+ if err != nil {
+ return -1, err
+ }
+ size += childByteSize
+ }
+ }
+ return size, nil
+ default:
+ return -1, fmt.Errorf("%s is a dynamic type", t.String())
+ }
+}
+
+// IsTransactionType checks if a type string represents a transaction type
+// argument, such as "txn", "pay", "keyreg", etc.
+func IsTransactionType(s string) bool {
+ switch s {
+ case "txn", "pay", "keyreg", "acfg", "axfer", "afrz", "appl":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/data/abi/abi_type_test.go b/data/abi/abi_type_test.go
new file mode 100644
index 000000000..f96dfaf06
--- /dev/null
+++ b/data/abi/abi_type_test.go
@@ -0,0 +1,613 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMakeTypeValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // uint
+ for i := 8; i <= 512; i += 8 {
+ uintType, err := makeUintType(i)
+ require.NoError(t, err, "make uint type in valid space should not return error")
+ expected := "uint" + strconv.Itoa(i)
+ actual := uintType.String()
+ require.Equal(t, expected, actual, "makeUintType: expected %s, actual %s", expected, actual)
+ }
+ // ufixed
+ for i := 8; i <= 512; i += 8 {
+ for j := 1; j <= 160; j++ {
+ ufixedType, err := makeUfixedType(i, j)
+ require.NoError(t, err, "make ufixed type in valid space should not return error")
+ expected := "ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j)
+ actual := ufixedType.String()
+ require.Equal(t, expected, actual,
+ "TypeOf ufixed error: expected %s, actual %s", expected, actual)
+ }
+ }
+ // bool/strings/address/byte + dynamic/static array + tuple
+ var testcases = []struct {
+ input Type
+ testType string
+ expected string
+ }{
+ {input: boolType, testType: "bool", expected: "bool"},
+ {input: stringType, testType: "string", expected: "string"},
+ {input: addressType, testType: "address", expected: "address"},
+ {input: byteType, testType: "byte", expected: "byte"},
+ // dynamic array
+ {
+ input: makeDynamicArrayType(
+ Type{
+ abiTypeID: Uint,
+ bitSize: uint16(32),
+ },
+ ),
+ testType: "dynamic array",
+ expected: "uint32[]",
+ },
+ {
+ input: makeDynamicArrayType(
+ makeDynamicArrayType(
+ byteType,
+ ),
+ ),
+ testType: "dynamic array",
+ expected: "byte[][]",
+ },
+ {
+ input: makeStaticArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(128),
+ precision: uint16(10),
+ },
+ uint16(100),
+ ),
+ testType: "static array",
+ expected: "ufixed128x10[100]",
+ },
+ {
+ input: makeStaticArrayType(
+ makeStaticArrayType(
+ boolType,
+ uint16(128),
+ ),
+ uint16(256),
+ ),
+ testType: "static array",
+ expected: "bool[128][256]",
+ },
+ // tuple type
+ {
+ input: Type{
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ {
+ abiTypeID: Uint,
+ bitSize: uint16(32),
+ },
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(256),
+ precision: uint16(10),
+ },
+ ),
+ },
+ staticLength: 4,
+ },
+ makeDynamicArrayType(byteType),
+ },
+ staticLength: 3,
+ },
+ testType: "tuple type",
+ expected: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
+ },
+ }
+ for _, testcase := range testcases {
+ t.Run(fmt.Sprintf("MakeType test %s", testcase.testType), func(t *testing.T) {
+ actual := testcase.input.String()
+ require.Equal(t, testcase.expected, actual,
+ "MakeType: expected %s, actual %s", testcase.expected, actual)
+ })
+ }
+}
+
+func TestMakeTypeInvalid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // uint
+ for i := 0; i <= 1000; i++ {
+ randInput := rand.Uint32() % (1 << 16)
+ for randInput%8 == 0 && randInput <= 512 && randInput >= 8 {
+ randInput = rand.Uint32() % (1 << 16)
+ }
+ // note: if a var mod 8 = 0 (or not) in uint32, then it should mod 8 = 0 (or not) in uint16.
+ _, err := makeUintType(int(randInput))
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", uint16(randInput))
+ }
+ // ufixed
+ for i := 0; i <= 10000; i++ {
+ randSize := rand.Uint64() % (1 << 16)
+ for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
+ randSize = rand.Uint64() % (1 << 16)
+ }
+ randPrecision := rand.Uint32()
+ for randPrecision >= 1 && randPrecision <= 160 {
+ randPrecision = rand.Uint32()
+ }
+ _, err := makeUfixedType(int(randSize), int(randPrecision))
+ require.Error(t, err, "makeUfixedType: should throw error on bitSize %d, precision %d", randSize, randPrecision)
+ }
+}
+
+func TestTypeFromStringValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // uint
+ for i := 8; i <= 512; i += 8 {
+ expected, err := makeUintType(i)
+ require.NoError(t, err, "make uint type in valid space should not return error")
+ actual, err := TypeOf(expected.String())
+ require.NoError(t, err, "TypeOf: uint parsing error: %s", expected.String())
+ require.Equal(t, expected, actual,
+ "TypeOf: expected %s, actual %s", expected.String(), actual.String())
+ }
+ // ufixed
+ for i := 8; i <= 512; i += 8 {
+ for j := 1; j <= 160; j++ {
+ expected, err := makeUfixedType(i, j)
+ require.NoError(t, err, "make ufixed type in valid space should not return error")
+ actual, err := TypeOf("ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j))
+ require.NoError(t, err, "TypeOf ufixed parsing error: %s", expected.String())
+ require.Equal(t, expected, actual,
+ "TypeOf ufixed: expected %s, actual %s", expected.String(), actual.String())
+ }
+ }
+ var testcases = []struct {
+ input string
+ testType string
+ expected Type
+ }{
+ {input: boolType.String(), testType: "bool", expected: boolType},
+ {input: stringType.String(), testType: "string", expected: stringType},
+ {input: addressType.String(), testType: "address", expected: addressType},
+ {input: byteType.String(), testType: "byte", expected: byteType},
+ {
+ input: "uint256[]",
+ testType: "dynamic array",
+ expected: makeDynamicArrayType(Type{abiTypeID: Uint, bitSize: 256}),
+ },
+ {
+ input: "ufixed256x64[]",
+ testType: "dynamic array",
+ expected: makeDynamicArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: 256,
+ precision: 64,
+ },
+ ),
+ },
+ {
+ input: "byte[][][][]",
+ testType: "dynamic array",
+ expected: makeDynamicArrayType(
+ makeDynamicArrayType(
+ makeDynamicArrayType(
+ makeDynamicArrayType(
+ byteType,
+ ),
+ ),
+ ),
+ ),
+ },
+ // static array
+ {
+ input: "address[100]",
+ testType: "static array",
+ expected: makeStaticArrayType(
+ addressType,
+ uint16(100),
+ ),
+ },
+ {
+ input: "uint64[][200]",
+ testType: "static array",
+ expected: makeStaticArrayType(
+ makeDynamicArrayType(
+ Type{abiTypeID: Uint, bitSize: uint16(64)},
+ ),
+ uint16(200),
+ ),
+ },
+ // tuple type
+ {
+ input: "()",
+ testType: "tuple type",
+ expected: Type{
+ abiTypeID: Tuple,
+ childTypes: []Type{},
+ staticLength: 0,
+ },
+ },
+ {
+ input: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
+ testType: "tuple type",
+ expected: Type{
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ {
+ abiTypeID: Uint,
+ bitSize: uint16(32),
+ },
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(256),
+ precision: uint16(10),
+ },
+ ),
+ },
+ staticLength: 4,
+ },
+ makeDynamicArrayType(byteType),
+ },
+ staticLength: 3,
+ },
+ },
+ {
+ input: "(uint32,(address,byte,bool[10],(ufixed256x10[])))",
+ testType: "tuple type",
+ expected: Type{
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ {
+ abiTypeID: Uint,
+ bitSize: uint16(32),
+ },
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ makeDynamicArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(256),
+ precision: uint16(10),
+ },
+ ),
+ },
+ staticLength: 1,
+ },
+ },
+ staticLength: 4,
+ },
+ },
+ staticLength: 2,
+ },
+ },
+ {
+ input: "((uint32),(address,(byte,bool[10],ufixed256x10[])))",
+ testType: "tuple type",
+ expected: Type{
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ {
+ abiTypeID: Uint,
+ bitSize: uint16(32),
+ },
+ },
+ staticLength: 1,
+ },
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ addressType,
+ {
+ abiTypeID: Tuple,
+ childTypes: []Type{
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
+ Type{
+ abiTypeID: Ufixed,
+ bitSize: uint16(256),
+ precision: uint16(10),
+ },
+ ),
+ },
+ staticLength: 3,
+ },
+ },
+ staticLength: 2,
+ },
+ },
+ staticLength: 2,
+ },
+ },
+ }
+ for _, testcase := range testcases {
+ t.Run(fmt.Sprintf("TypeOf test %s", testcase.testType), func(t *testing.T) {
+ actual, err := TypeOf(testcase.input)
+ require.NoError(t, err, "TypeOf %s parsing error", testcase.testType)
+ require.Equal(t, testcase.expected, actual, "TestFromString %s: expected %s, actual %s",
+ testcase.testType, testcase.expected.String(), actual.String())
+ })
+ }
+}
+
+func TestTypeFromStringInvalid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ for i := 0; i <= 1000; i++ {
+ randSize := rand.Uint64()
+ for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
+ randSize = rand.Uint64()
+ }
+ errorInput := "uint" + strconv.FormatUint(randSize, 10)
+ _, err := TypeOf(errorInput)
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
+ }
+ for i := 0; i <= 10000; i++ {
+ randSize := rand.Uint64()
+ for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
+ randSize = rand.Uint64()
+ }
+ randPrecision := rand.Uint64()
+ for randPrecision >= 1 && randPrecision <= 160 {
+ randPrecision = rand.Uint64()
+ }
+ errorInput := "ufixed" + strconv.FormatUint(randSize, 10) + "x" + strconv.FormatUint(randPrecision, 10)
+ _, err := TypeOf(errorInput)
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
+ }
+ var testcases = []string{
+ // uint
+ "uint123x345",
+ "uint 128",
+ "uint8 ",
+ "uint!8",
+ "uint[32]",
+ "uint-893",
+ "uint#120\\",
+ // ufixed
+ "ufixed000000000016x0000010",
+ "ufixed123x345",
+ "ufixed 128 x 100",
+ "ufixed64x10 ",
+ "ufixed!8x2 ",
+ "ufixed[32]x16",
+ "ufixed-64x+100",
+ "ufixed16x+12",
+ // dynamic array
+ "uint256 []",
+ "byte[] ",
+ "[][][]",
+ "stuff[]",
+ // static array
+ "ufixed32x10[0]",
+ "byte[10 ]",
+ "uint64[0x21]",
+ // tuple
+ "(ufixed128x10))",
+ "(,uint128,byte[])",
+ "(address,ufixed64x5,)",
+ "(byte[16],somethingwrong)",
+ "( )",
+ "((uint32)",
+ "(byte,,byte)",
+ "((byte),,(byte))",
+ }
+ for _, testcase := range testcases {
+ t.Run(fmt.Sprintf("TypeOf dynamic array test %s", testcase), func(t *testing.T) {
+ _, err := TypeOf(testcase)
+ require.Error(t, err, "%s should throw error", testcase)
+ })
+ }
+}
+
+func generateTupleType(baseTypes []Type, tupleTypes []Type) Type {
+ if len(baseTypes) == 0 && len(tupleTypes) == 0 {
+ panic("should not pass all nil arrays into generateTupleType")
+ }
+ tupleLen := 0
+ for tupleLen == 0 {
+ tupleLen = rand.Intn(20)
+ }
+ resultTypes := make([]Type, tupleLen)
+ for i := 0; i < tupleLen; i++ {
+ baseOrTuple := rand.Intn(5)
+ if baseOrTuple == 1 && len(tupleTypes) > 0 {
+ resultTypes[i] = tupleTypes[rand.Intn(len(tupleTypes))]
+ } else {
+ resultTypes[i] = baseTypes[rand.Intn(len(baseTypes))]
+ }
+ }
+ return Type{abiTypeID: Tuple, childTypes: resultTypes, staticLength: uint16(tupleLen)}
+}
+
+func TestTypeMISC(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ rand.Seed(time.Now().Unix())
+
+ var testpool = []Type{
+ boolType,
+ addressType,
+ stringType,
+ byteType,
+ }
+ for i := 8; i <= 512; i += 8 {
+ uintT, err := makeUintType(i)
+ require.NoError(t, err, "make uint type error")
+ testpool = append(testpool, uintT)
+ }
+ for i := 8; i <= 512; i += 8 {
+ for j := 1; j <= 160; j++ {
+ ufixedT, err := makeUfixedType(i, j)
+ require.NoError(t, err, "make ufixed type error: bitSize %d, precision %d", i, j)
+ testpool = append(testpool, ufixedT)
+ }
+ }
+ for _, testcase := range testpool {
+ testpool = append(testpool, makeDynamicArrayType(testcase))
+ testpool = append(testpool, makeStaticArrayType(testcase, 10))
+ testpool = append(testpool, makeStaticArrayType(testcase, 20))
+ }
+
+ for _, testcase := range testpool {
+ require.True(t, testcase.Equal(testcase), "test type self equal error")
+ }
+ baseTestCount := 0
+ for baseTestCount < 1000 {
+ index0 := rand.Intn(len(testpool))
+ index1 := rand.Intn(len(testpool))
+ if index0 == index1 {
+ continue
+ }
+ require.False(t, testpool[index0].Equal(testpool[index1]),
+ "test type not equal error\n%s\n%s",
+ testpool[index0].String(), testpool[index1].String())
+ baseTestCount++
+ }
+
+ testpoolTuple := make([]Type, 0)
+ for i := 0; i < 100; i++ {
+ testpoolTuple = append(testpoolTuple, generateTupleType(testpool, testpoolTuple))
+ }
+ for _, testcaseTuple := range testpoolTuple {
+ require.True(t, testcaseTuple.Equal(testcaseTuple), "test type tuple equal error")
+ }
+
+ tupleTestCount := 0
+ for tupleTestCount < 100 {
+ index0 := rand.Intn(len(testpoolTuple))
+ index1 := rand.Intn(len(testpoolTuple))
+ if testpoolTuple[index0].String() == testpoolTuple[index1].String() {
+ continue
+ }
+ require.False(t, testpoolTuple[index0].Equal(testpoolTuple[index1]),
+ "test type tuple not equal error\n%s\n%s",
+ testpoolTuple[index0].String(), testpoolTuple[index1].String())
+ tupleTestCount++
+ }
+
+ testpool = append(testpool, testpoolTuple...)
+ isDynamicCount := 0
+ for isDynamicCount < 100 {
+ index := rand.Intn(len(testpool))
+ isDynamicArr := strings.Contains(testpool[index].String(), "[]")
+ isDynamicStr := strings.Contains(testpool[index].String(), "string")
+ require.Equal(t, isDynamicArr || isDynamicStr, testpool[index].IsDynamic(),
+ "test type isDynamic error\n%s", testpool[index].String())
+ isDynamicCount++
+ }
+
+ addressByteLen, err := addressType.ByteLen()
+ require.NoError(t, err, "address type bytelen should not return error")
+ require.Equal(t, 32, addressByteLen, "address type bytelen should be 32")
+ byteByteLen, err := byteType.ByteLen()
+ require.NoError(t, err, "byte type bytelen should not return error")
+ require.Equal(t, 1, byteByteLen, "byte type bytelen should be 1")
+ boolByteLen, err := boolType.ByteLen()
+ require.NoError(t, err, "bool type bytelen should be 1")
+ require.Equal(t, 1, boolByteLen, "bool type bytelen should be 1")
+
+ byteLenTestCount := 0
+ for byteLenTestCount < 100 {
+ index := rand.Intn(len(testpool))
+ testType := testpool[index]
+ byteLen, err := testType.ByteLen()
+ if testType.IsDynamic() {
+ require.Error(t, err, "byteLen test error on %s dynamic type, should have error",
+ testType.String())
+ } else {
+ require.NoError(t, err, "byteLen test error on %s dynamic type, should not have error")
+ if testType.abiTypeID == Tuple {
+ sizeSum := 0
+ for i := 0; i < len(testType.childTypes); i++ {
+ if testType.childTypes[i].abiTypeID == Bool {
+ // search previous bool
+ before := findBoolLR(testType.childTypes, i, -1)
+ // search after bool
+ after := findBoolLR(testType.childTypes, i, 1)
+ // append to heads and tails
+ require.True(t, before%8 == 0, "expected tuple bool compact by 8")
+ if after > 7 {
+ after = 7
+ }
+ i += after
+ sizeSum++
+ } else {
+ childByteSize, err := testType.childTypes[i].ByteLen()
+ require.NoError(t, err, "byteLen not expected to fail on tuple child type")
+ sizeSum += childByteSize
+ }
+ }
+
+ require.Equal(t, sizeSum, byteLen,
+ "%s do not match calculated byte length %d", testType.String(), sizeSum)
+ } else if testType.abiTypeID == ArrayStatic {
+ if testType.childTypes[0].abiTypeID == Bool {
+ expected := testType.staticLength / 8
+ if testType.staticLength%8 != 0 {
+ expected++
+ }
+ actual, err := testType.ByteLen()
+ require.NoError(t, err, "%s should not return error on byteLen test")
+ require.Equal(t, int(expected), actual, "%s do not match calculated byte length %d",
+ testType.String(), expected)
+ } else {
+ childSize, err := testType.childTypes[0].ByteLen()
+ require.NoError(t, err, "%s should not return error on byteLen test", testType.childTypes[0].String())
+ expected := childSize * int(testType.staticLength)
+ require.Equal(t, expected, byteLen,
+ "%s do not match calculated byte length %d", testType.String(), expected)
+ }
+ }
+ }
+ byteLenTestCount++
+ }
+}
diff --git a/data/account/msgp_gen.go b/data/account/msgp_gen.go
new file mode 100644
index 000000000..8f6a96fd7
--- /dev/null
+++ b/data/account/msgp_gen.go
@@ -0,0 +1,238 @@
+package account
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "github.com/algorand/msgp/msgp"
+)
+
+// The following msgp objects are implemented in this file:
+// ParticipationKeyIdentity
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+
+// MarshalMsg implements msgp.Marshaler
+func (z *ParticipationKeyIdentity) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(6)
+ var zb0001Mask uint8 /* 7 bits */
+ if (*z).Parent.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).FirstValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).KeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).LastValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).VoteID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ if (*z).VRFSK.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "addr"
+ o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72)
+ o = (*z).Parent.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "fv"
+ o = append(o, 0xa2, 0x66, 0x76)
+ o = (*z).FirstValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "kd"
+ o = append(o, 0xa2, 0x6b, 0x64)
+ o = msgp.AppendUint64(o, (*z).KeyDilution)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "lv"
+ o = append(o, 0xa2, 0x6c, 0x76)
+ o = (*z).LastValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "vote-id"
+ o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x2d, 0x69, 0x64)
+ o = (*z).VoteID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "vrfsk"
+ o = append(o, 0xa5, 0x76, 0x72, 0x66, 0x73, 0x6b)
+ o = (*z).VRFSK.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *ParticipationKeyIdentity) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationKeyIdentity)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ParticipationKeyIdentity) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Parent.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Parent")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VRFSK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VRFSK")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).FirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "FirstValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).LastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = ParticipationKeyIdentity{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "addr":
+ bts, err = (*z).Parent.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Parent")
+ return
+ }
+ case "vrfsk":
+ bts, err = (*z).VRFSK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VRFSK")
+ return
+ }
+ case "vote-id":
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteID")
+ return
+ }
+ case "fv":
+ bts, err = (*z).FirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FirstValid")
+ return
+ }
+ case "lv":
+ bts, err = (*z).LastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastValid")
+ return
+ }
+ case "kd":
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KeyDilution")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ParticipationKeyIdentity) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationKeyIdentity)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ParticipationKeyIdentity) Msgsize() (s int) {
+ s = 1 + 5 + (*z).Parent.Msgsize() + 6 + (*z).VRFSK.Msgsize() + 8 + (*z).VoteID.Msgsize() + 3 + (*z).FirstValid.Msgsize() + 3 + (*z).LastValid.Msgsize() + 3 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ParticipationKeyIdentity) MsgIsZero() bool {
+ return ((*z).Parent.MsgIsZero()) && ((*z).VRFSK.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && ((*z).KeyDilution == 0)
+}
diff --git a/data/account/msgp_gen_test.go b/data/account/msgp_gen_test.go
new file mode 100644
index 000000000..a8927e790
--- /dev/null
+++ b/data/account/msgp_gen_test.go
@@ -0,0 +1,73 @@
+// +build !skip_msgp_testing
+
+package account
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/msgp/msgp"
+)
+
+func TestMarshalUnmarshalParticipationKeyIdentity(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ParticipationKeyIdentity{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingParticipationKeyIdentity(t *testing.T) {
+ protocol.RunEncodingTest(t, &ParticipationKeyIdentity{})
+}
+
+func BenchmarkMarshalMsgParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/data/account/participation.go b/data/account/participation.go
index 269163c99..474f6ca5e 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -41,6 +41,7 @@ import (
// For correctness, all Roots should have no more than one Participation
// globally active at any time. If this condition is violated, the Root may
// equivocate. (Algorand tolerates a limited fraction of misbehaving accounts.)
+//msgp:ignore Participation
type Participation struct {
Parent basics.Address
@@ -56,9 +57,50 @@ type Participation struct {
KeyDilution uint64
}
+// ParticipationKeyIdentity is for msgpack encoding the participation data.
+type ParticipationKeyIdentity struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Parent basics.Address `codec:"addr"`
+ VRFSK crypto.VrfPrivkey `codec:"vrfsk"`
+ VoteID crypto.OneTimeSignatureVerifier `codec:"vote-id"`
+ FirstValid basics.Round `codec:"fv"`
+ LastValid basics.Round `codec:"lv"`
+ KeyDilution uint64 `codec:"kd"`
+}
+
+// ToBeHashed implements the Hashable interface.
+func (id *ParticipationKeyIdentity) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.ParticipationKeys, protocol.Encode(id)
+}
+
+// ID creates a ParticipationID hash from the identity file.
+func (id ParticipationKeyIdentity) ID() ParticipationID {
+ return ParticipationID(crypto.HashObj(&id))
+}
+
+// ID computes a ParticipationID.
+func (part Participation) ID() ParticipationID {
+ idData := ParticipationKeyIdentity{
+ Parent: part.Parent,
+ FirstValid: part.FirstValid,
+ LastValid: part.LastValid,
+ KeyDilution: part.KeyDilution,
+ }
+ if part.VRF != nil {
+ copy(idData.VRFSK[:], part.VRF.SK[:])
+ }
+ if part.Voting != nil {
+ copy(idData.VoteID[:], part.Voting.OneTimeSignatureVerifier[:])
+ }
+
+ return idData.ID()
+}
+
// PersistedParticipation encapsulates the static state of the participation
// for a single address at any given moment, while providing the ability
// to handle persistence and deletion of secrets.
+//msgp:ignore PersistedParticipation
type PersistedParticipation struct {
Participation
@@ -164,7 +206,7 @@ func (part PersistedParticipation) PersistNewParent() error {
// FillDBWithParticipationKeys initializes the passed database with participation keys
func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part PersistedParticipation, err error) {
if lastValid < firstValid {
- err = fmt.Errorf("FillDBWithParticipationKeys: lastValid %d is after firstValid %d", lastValid, firstValid)
+ err = fmt.Errorf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", firstValid, lastValid)
return
}
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
new file mode 100644
index 000000000..213e2be50
--- /dev/null
+++ b/data/account/participationRegistry.go
@@ -0,0 +1,953 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package account
+
+import (
+ "context"
+ "database/sql"
+ "encoding/base32"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+const defaultTimeout = 5 * time.Second
+
+// ParticipationID identifies a particular set of participation keys.
+//msgp:ignore ParticipationID
+type ParticipationID crypto.Digest
+
+// IsZero returns true if the ParticipationID is all zero bytes.
+func (pid ParticipationID) IsZero() bool {
+ return (crypto.Digest(pid)).IsZero()
+}
+
+// String prints a b32 version of this ID.
+func (pid ParticipationID) String() string {
+ return base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(pid[:])
+}
+
+// ParseParticipationID takes a string and returns a ParticipationID object
+func ParseParticipationID(str string) (d ParticipationID, err error) {
+ decoded, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(str)
+ if err != nil {
+ return d, err
+ }
+ if len(decoded) != len(d) {
+ return d, fmt.Errorf(`attempted to decode a string which was not a participation id: "%s"`, str)
+ }
+ copy(d[:], decoded[:])
+ return d, err
+}
+
+// ParticipationRecord contains all metadata relating to a set of participation keys.
+type ParticipationRecord struct {
+ ParticipationID ParticipationID
+
+ Account basics.Address
+ FirstValid basics.Round
+ LastValid basics.Round
+ KeyDilution uint64
+
+ LastVote basics.Round
+ LastBlockProposal basics.Round
+ LastStateProof basics.Round
+ EffectiveFirst basics.Round
+ EffectiveLast basics.Round
+
+ VRF *crypto.VRFSecrets
+ Voting *crypto.OneTimeSignatureSecrets
+}
+
+var zeroParticipationRecord = ParticipationRecord{}
+
+// IsZero returns true if the object contains zero values.
+func (r ParticipationRecord) IsZero() bool {
+ return r == zeroParticipationRecord
+}
+
+// Duplicate creates a copy of the current object. This is required once secrets are stored.
+func (r ParticipationRecord) Duplicate() ParticipationRecord {
+ var vrf crypto.VRFSecrets
+ if r.VRF != nil {
+ copy(vrf.SK[:], r.VRF.SK[:])
+ copy(vrf.PK[:], r.VRF.PK[:])
+ }
+
+ var voting crypto.OneTimeSignatureSecrets
+ if r.Voting != nil {
+ voting = r.Voting.Snapshot()
+ }
+ return ParticipationRecord{
+ ParticipationID: r.ParticipationID,
+ Account: r.Account,
+ FirstValid: r.FirstValid,
+ LastValid: r.LastValid,
+ KeyDilution: r.KeyDilution,
+ LastVote: r.LastVote,
+ LastBlockProposal: r.LastBlockProposal,
+ LastStateProof: r.LastStateProof,
+ EffectiveFirst: r.EffectiveFirst,
+ EffectiveLast: r.EffectiveLast,
+ VRF: &vrf,
+ Voting: &voting,
+ }
+}
+
+// ParticipationAction is used when recording participation actions.
+//msgp:ignore ParticipationAction
+type ParticipationAction int
+
+// ParticipationAction types
+const (
+ Vote ParticipationAction = iota
+ BlockProposal
+ StateProof
+)
+
+// ErrParticipationIDNotFound is used when attempting to update a set of keys which do not exist.
+var ErrParticipationIDNotFound = errors.New("the participation ID was not found")
+
+// ErrInvalidRegisterRange is used when attempting to register a participation key on a round that is out of range.
+var ErrInvalidRegisterRange = errors.New("key would not be active within range")
+
+// ErrUnknownParticipationAction is used when record is given something other than the known actions.
+var ErrUnknownParticipationAction = errors.New("unknown participation action")
+
+// ErrAlreadyInserted is used when inserting a key which already exists in the registry.
+var ErrAlreadyInserted = errors.New("these participation keys are already inserted")
+
+// ErrActiveKeyNotFound is used when attempting to update an account with no active key
+var ErrActiveKeyNotFound = errors.New("no active participation key found for account")
+
+// ErrMultipleValidKeys is used when recording a result but multiple valid keys were found. This should not be possible.
+var ErrMultipleValidKeys = errors.New("multiple valid keys found while recording key usage")
+
+// ErrMultipleKeysForID this should never happen. Multiple keys with the same participationID
+var ErrMultipleKeysForID = errors.New("multiple valid keys found for the same participationID")
+
+// ErrNoKeyForID there may be cases where a key is deleted and used at the same time, so this error should be handled.
+var ErrNoKeyForID = errors.New("no valid key found for the participationID")
+
+// ParticipationRegistry contain all functions for interacting with the Participation Registry.
+type ParticipationRegistry interface {
+ // Insert adds a record to storage and computes the ParticipationID
+ Insert(record Participation) (ParticipationID, error)
+
+ // Delete removes a record from storage.
+ Delete(id ParticipationID) error
+
+ // DeleteExpired removes all records from storage which are expired on the given round.
+ DeleteExpired(round basics.Round) error
+
+ // Get a participation record.
+ Get(id ParticipationID) ParticipationRecord
+
+ // GetAll of the participation records.
+ GetAll() []ParticipationRecord
+
+ // Register updates the EffectiveFirst and EffectiveLast fields. If there are multiple records for the account
+ // then it is possible for multiple records to be updated.
+ Register(id ParticipationID, on basics.Round) error
+
+ // Record sets the Last* field for the active ParticipationID for the given account.
+ Record(account basics.Address, round basics.Round, participationType ParticipationAction) error
+
+ // Flush ensures that all changes have been written to the underlying data store.
+ Flush(timeout time.Duration) error
+
+ // Close any resources used to implement the interface.
+ Close()
+}
+
+// MakeParticipationRegistry creates a db.Accessor backed ParticipationRegistry.
+func MakeParticipationRegistry(accessor db.Pair, log logging.Logger) (ParticipationRegistry, error) {
+ return makeParticipationRegistry(accessor, log)
+}
+
+// makeParticipationRegistry creates a db.Accessor backed ParticipationRegistry.
+func makeParticipationRegistry(accessor db.Pair, log logging.Logger) (*participationDB, error) {
+ if log == nil {
+ return nil, errors.New("invalid logger provided")
+ }
+
+ migrations := []db.Migration{
+ dbSchemaUpgrade0,
+ }
+
+ err := db.Initialize(accessor.Wdb, migrations)
+ if err != nil {
+ accessor.Close()
+ return nil, fmt.Errorf("unable to initialize participation registry database: %w", err)
+ }
+
+ registry := &participationDB{
+ log: log,
+ store: accessor,
+ writeQueue: make(chan partDBWriteRecord, 10),
+ writeQueueDone: make(chan struct{}),
+ flushTimeout: defaultTimeout,
+ }
+ go registry.writeThread()
+
+ err = registry.initializeCache()
+ if err != nil {
+ registry.Close()
+ return nil, fmt.Errorf("unable to initialize participation registry cache: %w", err)
+ }
+
+ return registry, nil
+}
+
+// Queries
+const (
+ createKeysets = `CREATE TABLE Keysets (
+ pk INTEGER PRIMARY KEY NOT NULL,
+
+ participationID BLOB NOT NULL,
+ account BLOB NOT NULL,
+
+ firstValidRound INTEGER NOT NULL,
+ lastValidRound INTEGER NOT NULL,
+ keyDilution INTEGER NOT NULL,
+
+ vrf BLOB, --* msgpack encoding of ParticipationAccount.vrf
+ stateProof BLOB --* msgpack encoding of ParticipationAccount.BlockProof
+ )`
+
+ createRolling = `CREATE TABLE Rolling (
+ pk INTEGER PRIMARY KEY NOT NULL,
+
+ lastVoteRound INTEGER,
+ lastBlockProposalRound INTEGER,
+ lastStateProofRound INTEGER,
+ effectiveFirstRound INTEGER,
+ effectiveLastRound INTEGER,
+
+ voting BLOB --* msgpack encoding of ParticipationAccount.voting
+ )`
+
+ createStateProof = `CREATE TABLE StateProofKeys (
+ pk INTEGER NOT NULL, --* join with keyset to find key for a particular participation id
+ round INTEGER NOT NULL, --* committed round for this key
+ key BLOB NOT NULL, --* msgpack encoding of ParticipationAccount.BlockProof.SignatureAlgorithm
+ PRIMARY KEY (pk, round)
+ )`
+ insertKeysetQuery = `INSERT INTO Keysets (participationID, account, firstValidRound, lastValidRound, keyDilution, vrf) VALUES (?, ?, ?, ?, ?, ?)`
+ insertRollingQuery = `INSERT INTO Rolling (pk, voting) VALUES (?, ?)`
+
+ // SELECT pk FROM Keysets WHERE participationID = ?
+ selectPK = `SELECT pk FROM Keysets WHERE participationID = ? LIMIT 1`
+ selectLastPK = `SELECT pk FROM Keysets ORDER BY pk DESC LIMIT 1`
+ selectRecords = `SELECT
+ k.participationID, k.account, k.firstValidRound,
+ k.lastValidRound, k.keyDilution, k.vrf,
+ r.lastVoteRound, r.lastBlockProposalRound, r.lastStateProofRound,
+ r.effectiveFirstRound, r.effectiveLastRound, r.voting
+ FROM Keysets k
+ INNER JOIN Rolling r
+ ON k.pk = r.pk`
+ deleteKeysets = `DELETE FROM Keysets WHERE pk=?`
+ deleteRolling = `DELETE FROM Rolling WHERE pk=?`
+ updateRollingFieldsSQL = `UPDATE Rolling
+ SET lastVoteRound=?,
+ lastBlockProposalRound=?,
+ lastStateProofRound=?,
+ effectiveFirstRound=?,
+ effectiveLastRound=?
+ WHERE pk IN (SELECT pk FROM Keysets WHERE participationID=?)`
+)
+
+// dbSchemaUpgrade0 initialize the tables.
+func dbSchemaUpgrade0(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ // Keysets is for the immutable data.
+ _, err := tx.Exec(createKeysets)
+ if err != nil {
+ return err
+ }
+
+ // Rolling may change over time.
+ _, err = tx.Exec(createRolling)
+ if err != nil {
+ return err
+ }
+
+ // For performance reasons, state proofs are in a separate table.
+ _, err = tx.Exec(createStateProof)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// participationDB provides a concrete implementation of the ParticipationRegistry interface.
+type participationDB struct {
+ cache map[ParticipationID]ParticipationRecord
+
+ // dirty marked on Record(), cleared on Register(), Delete(), Flush()
+ dirty map[ParticipationID]struct{}
+
+ log logging.Logger
+ store db.Pair
+ mutex deadlock.RWMutex
+
+ writeQueue chan partDBWriteRecord
+ writeQueueDone chan struct{}
+
+ flushTimeout time.Duration
+}
+
+type updatingParticipationRecord struct {
+ ParticipationRecord
+
+ required bool
+}
+
+// partDBWriteRecord event object sent to the writeThread to facilitate async
+// database writes. Only one set of event fields should be set at a time.
+type partDBWriteRecord struct {
+ insertID ParticipationID
+ insert Participation
+
+ registerUpdated map[ParticipationID]updatingParticipationRecord
+
+ delete ParticipationID
+
+ flushResultChannel chan error
+}
+
+func (db *participationDB) initializeCache() error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ records, err := db.getAllFromDB()
+ if err != nil {
+ return err
+ }
+
+ cache := make(map[ParticipationID]ParticipationRecord)
+ for _, record := range records {
+ // Check if it already exists
+ if _, ok := cache[record.ParticipationID]; ok {
+ return ErrMultipleKeysForID
+ }
+ cache[record.ParticipationID] = record
+ }
+
+ db.cache = cache
+ db.dirty = make(map[ParticipationID]struct{})
+ return nil
+}
+
+func (db *participationDB) writeThread() {
+ defer close(db.writeQueueDone)
+ var err error
+ var lastErr error
+ for {
+ var wr partDBWriteRecord
+ var chanOk bool
+
+ // blocking read until next activity or close
+ wr, chanOk = <-db.writeQueue
+ if !chanOk {
+ return // chan closed
+ }
+
+ if len(wr.registerUpdated) != 0 {
+ err = db.registerInner(wr.registerUpdated)
+ } else if !wr.insertID.IsZero() {
+ err = db.insertInner(wr.insert, wr.insertID)
+ } else if !wr.delete.IsZero() {
+ err = db.deleteInner(wr.delete)
+ } else if wr.flushResultChannel != nil {
+ err = db.flushInner()
+ }
+ if err != nil {
+ lastErr = err
+ }
+
+ if wr.flushResultChannel != nil {
+ wr.flushResultChannel <- lastErr
+ lastErr = nil
+ }
+ }
+}
+
+// verifyExecWithOneRowEffected checks for a successful Exec and also verifies exactly 1 row was affected
+func verifyExecWithOneRowEffected(err error, result sql.Result, operationName string) error {
+ if err != nil {
+ return fmt.Errorf("unable to execute %s: %w", operationName, err)
+ }
+ rows, err := result.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("unable to get %s rows affected: %w", operationName, err)
+ }
+ if rows != 1 {
+ return fmt.Errorf("unexpected number of %s rows affected, expected 1 found %d", operationName, rows)
+ }
+ return nil
+}
+
+func (db *participationDB) insertInner(record Participation, id ParticipationID) (err error) {
+
+ var rawVRF []byte
+ var rawVoting []byte
+
+ if record.VRF != nil {
+ rawVRF = protocol.Encode(record.VRF)
+ }
+ if record.Voting != nil {
+ voting := record.Voting.Snapshot()
+ rawVoting = protocol.Encode(&voting)
+ }
+
+ err = db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ result, err := tx.Exec(
+ insertKeysetQuery,
+ id[:],
+ record.Parent[:],
+ record.FirstValid,
+ record.LastValid,
+ record.KeyDilution,
+ rawVRF)
+ if err := verifyExecWithOneRowEffected(err, result, "insert keyset"); err != nil {
+ return err
+ }
+ pk, err := result.LastInsertId()
+ if err != nil {
+ return fmt.Errorf("unable to get pk from keyset: %w", err)
+ }
+
+ // Create Rolling entry
+ result, err = tx.Exec(insertRollingQuery, pk, rawVoting)
+ if err := verifyExecWithOneRowEffected(err, result, "insert rolling"); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ return err
+}
+
+func (db *participationDB) registerInner(updated map[ParticipationID]updatingParticipationRecord) error {
+ var cacheDeletes []ParticipationID
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ // Disable active key if there is one
+ for id, record := range updated {
+ err := updateRollingFields(ctx, tx, record.ParticipationRecord)
+ // Repair the case when no keys were updated
+ if err == ErrNoKeyForID {
+ db.log.Warn("participationDB unable to update key in cache. Removing from cache.")
+ cacheDeletes = append(cacheDeletes, id)
+ if !record.required {
+ err = nil
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("unable to disable old key when registering %s: %w", id, err)
+ }
+ }
+ return nil
+ })
+
+ // Update cache
+ if err == nil && len(cacheDeletes) != 0 {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+ for _, id := range cacheDeletes {
+ delete(db.cache, id)
+ delete(db.dirty, id)
+ }
+ }
+ return err
+}
+
+func (db *participationDB) deleteInner(id ParticipationID) error {
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ // Fetch primary key
+ var pk int
+ row := tx.QueryRow(selectPK, id[:])
+ err := row.Scan(&pk)
+ if err == sql.ErrNoRows {
+ // nothing to do.
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+
+ // Delete rows
+ result, err := tx.Exec(deleteKeysets, pk)
+ if err := verifyExecWithOneRowEffected(err, result, "delete keyset"); err != nil {
+ return err
+ }
+
+ result, err = tx.Exec(deleteRolling, pk)
+ if err := verifyExecWithOneRowEffected(err, result, "delete rolling"); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ return err
+}
+
+func (db *participationDB) flushInner() error {
+ var dirty map[ParticipationID]struct{}
+ db.mutex.Lock()
+ if len(db.dirty) != 0 {
+ dirty = db.dirty
+ db.dirty = make(map[ParticipationID]struct{})
+ } else {
+ dirty = nil
+ }
+
+ var needsUpdate []ParticipationRecord
+ // Verify that the dirty flag has not desynchronized from the cache.
+ for id := range dirty {
+ if rec, ok := db.cache[id]; !ok {
+ db.log.Warnf("participationDB fixing dirty flag de-synchronization for %s", id)
+ delete(db.cache, id)
+ } else {
+ needsUpdate = append(needsUpdate, rec)
+ }
+ }
+ db.mutex.Unlock()
+
+ if dirty == nil {
+ return nil
+ }
+
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var errorStr strings.Builder
+ for _, record := range needsUpdate {
+ err := updateRollingFields(ctx, tx, record)
+ // This should only be updating key usage so ignoring missing keys is not a problem.
+ if err != nil && err != ErrNoKeyForID {
+ if errorStr.Len() > 0 {
+ errorStr.WriteString(", ")
+ }
+ errorStr.WriteString(err.Error())
+ }
+ }
+ if errorStr.Len() > 0 {
+ return errors.New(errorStr.String())
+ }
+ return nil
+ })
+
+ if err != nil {
+ // put back what we didn't finish with
+ db.mutex.Lock()
+ for id, v := range dirty {
+ db.dirty[id] = v
+ }
+ db.mutex.Unlock()
+ }
+
+ return err
+}
+
+func (db *participationDB) Insert(record Participation) (id ParticipationID, err error) {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ id = record.ID()
+ if _, ok := db.cache[id]; ok {
+ return id, ErrAlreadyInserted
+ }
+
+ db.writeQueue <- partDBWriteRecord{
+ insertID: id,
+ insert: record,
+ }
+
+ // Make some copies.
+ var vrf *crypto.VRFSecrets
+ if record.VRF != nil {
+ vrf = new(crypto.VRFSecrets)
+ copy(vrf.SK[:], record.VRF.SK[:])
+ copy(vrf.PK[:], record.VRF.PK[:])
+ }
+
+ var voting *crypto.OneTimeSignatureSecrets
+ if record.Voting != nil {
+ voting = new(crypto.OneTimeSignatureSecrets)
+ *voting = record.Voting.Snapshot()
+ }
+
+ // update cache.
+ db.cache[id] = ParticipationRecord{
+ ParticipationID: id,
+ Account: record.Address(),
+ FirstValid: record.FirstValid,
+ LastValid: record.LastValid,
+ KeyDilution: record.KeyDilution,
+ LastVote: 0,
+ LastBlockProposal: 0,
+ LastStateProof: 0,
+ EffectiveFirst: 0,
+ EffectiveLast: 0,
+ Voting: voting,
+ VRF: vrf,
+ }
+
+ return
+}
+
+func (db *participationDB) Delete(id ParticipationID) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ // NoOp if key does not exist.
+ if _, ok := db.cache[id]; !ok {
+ return nil
+ }
+ delete(db.dirty, id)
+ delete(db.cache, id)
+ // do the db part async
+ db.writeQueue <- partDBWriteRecord{
+ delete: id,
+ }
+ return nil
+}
+
+func (db *participationDB) DeleteExpired(round basics.Round) error {
+ // This could be optimized to delete everything with one query.
+ for _, v := range db.GetAll() {
+ if v.LastValid < round {
+ err := db.Delete(v.ParticipationID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// scanRecords is a helper to manage scanning participation records.
+func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
+ results := make([]ParticipationRecord, 0)
+ for rows.Next() {
+ var record ParticipationRecord
+ var rawParticipation []byte
+ var rawAccount []byte
+ var rawVRF []byte
+ var rawVoting []byte
+
+ var lastVote sql.NullInt64
+ var lastBlockProposal sql.NullInt64
+ var lastCompactCertificate sql.NullInt64
+ var effectiveFirst sql.NullInt64
+ var effectiveLast sql.NullInt64
+
+ err := rows.Scan(
+ &rawParticipation,
+ &rawAccount,
+ &record.FirstValid,
+ &record.LastValid,
+ &record.KeyDilution,
+ &rawVRF,
+ &lastVote,
+ &lastBlockProposal,
+ &lastCompactCertificate,
+ &effectiveFirst,
+ &effectiveLast,
+ &rawVoting,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ copy(record.ParticipationID[:], rawParticipation)
+ copy(record.Account[:], rawAccount)
+
+ if len(rawVRF) > 0 {
+ record.VRF = &crypto.VRFSecrets{}
+ err = protocol.Decode(rawVRF, record.VRF)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode VRF: %w", err)
+ }
+ }
+
+ if len(rawVoting) > 0 {
+ record.Voting = &crypto.OneTimeSignatureSecrets{}
+ err = protocol.Decode(rawVoting, record.Voting)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Voting: %w", err)
+ }
+ }
+
+ // Check optional values.
+ if lastVote.Valid {
+ record.LastVote = basics.Round(lastVote.Int64)
+ }
+
+ if lastBlockProposal.Valid {
+ record.LastBlockProposal = basics.Round(lastBlockProposal.Int64)
+ }
+
+ if lastCompactCertificate.Valid {
+ record.LastStateProof = basics.Round(lastCompactCertificate.Int64)
+ }
+
+ if effectiveFirst.Valid {
+ record.EffectiveFirst = basics.Round(effectiveFirst.Int64)
+ }
+
+ if effectiveLast.Valid {
+ record.EffectiveLast = basics.Round(effectiveLast.Int64)
+ }
+
+ results = append(results, record)
+ }
+
+ return results, nil
+}
+
+func (db *participationDB) getAllFromDB() (records []ParticipationRecord, err error) {
+ err = db.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ rows, err := tx.Query(selectRecords)
+ if err != nil {
+ return fmt.Errorf("unable to query records: %w", err)
+ }
+
+ records, err = scanRecords(rows)
+ if err != nil {
+ records = nil
+ return fmt.Errorf("problem scanning records: %w", err)
+ }
+
+ return nil
+ })
+
+ return
+}
+
+func (db *participationDB) Get(id ParticipationID) ParticipationRecord {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+
+ record, ok := db.cache[id]
+ if !ok {
+ return ParticipationRecord{}
+ }
+ return record.Duplicate()
+}
+
+func (db *participationDB) GetAll() []ParticipationRecord {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+
+ results := make([]ParticipationRecord, 0, len(db.cache))
+ for _, record := range db.cache {
+ results = append(results, record.Duplicate())
+ }
+ return results
+}
+
+// updateRollingFields sets all of the rolling fields according to the record object.
+func updateRollingFields(ctx context.Context, tx *sql.Tx, record ParticipationRecord) error {
+ result, err := tx.ExecContext(ctx, updateRollingFieldsSQL,
+ record.LastVote,
+ record.LastBlockProposal,
+ record.LastStateProof,
+ record.EffectiveFirst,
+ record.EffectiveLast,
+ record.ParticipationID[:])
+ if err != nil {
+ return err
+ }
+
+ numRows, err := result.RowsAffected()
+ if err != nil {
+ return err
+ }
+
+ if numRows > 1 {
+ return ErrMultipleKeysForID
+ }
+
+ if numRows < 1 {
+ return ErrNoKeyForID
+ }
+
+ return nil
+}
+
+func recordActive(record ParticipationRecord, on basics.Round) bool {
+ return record.EffectiveLast != 0 && record.EffectiveFirst <= on && on <= record.EffectiveLast
+}
+
+// PKI TODO: Register needs a bit more work to make sure EffectiveFirst and
+// EffectiveLast are set at the right time. Specifically, the node
+// doesn't call Register until the key becomes active and is about
+// to be used, so effective first/last is updated just-in-time. It
+// would be better to update them when the KeyRegistration occurs.
+func (db *participationDB) Register(id ParticipationID, on basics.Round) error {
+ // Lookup recordToRegister for first/last valid and account.
+ recordToRegister := db.Get(id)
+ if recordToRegister.IsZero() {
+ return ErrParticipationIDNotFound
+ }
+
+ // No-op If the record is already active
+ if recordActive(recordToRegister, on) {
+ return nil
+ }
+
+ // round out of valid range.
+ if on < recordToRegister.FirstValid || on > recordToRegister.LastValid {
+ return ErrInvalidRegisterRange
+ }
+
+ var toUpdate []ParticipationRecord
+ db.mutex.Lock()
+ for _, record := range db.cache {
+ if record.Account == recordToRegister.Account && record.ParticipationID != id && recordActive(record, on) {
+ toUpdate = append(toUpdate, record)
+ }
+ }
+ db.mutex.Unlock()
+
+ updated := make(map[ParticipationID]updatingParticipationRecord)
+
+ // Disable active key if there is one
+ for _, record := range toUpdate {
+ record.EffectiveLast = on - 1
+ updated[record.ParticipationID] = updatingParticipationRecord{
+ record.Duplicate(),
+ false,
+ }
+ }
+ // Mark registered.
+ recordToRegister.EffectiveFirst = on
+ recordToRegister.EffectiveLast = recordToRegister.LastValid
+ updated[recordToRegister.ParticipationID] = updatingParticipationRecord{
+ recordToRegister,
+ true,
+ }
+
+ if len(updated) != 0 {
+ db.writeQueue <- partDBWriteRecord{
+ registerUpdated: updated,
+ }
+ db.mutex.Lock()
+ for id, record := range updated {
+ delete(db.dirty, id)
+ db.cache[id] = record.ParticipationRecord
+ }
+ db.mutex.Unlock()
+ }
+
+ db.log.Infof("Registered key (%s) for account (%s) first valid (%d) last valid (%d)\n",
+ id, recordToRegister.Account, recordToRegister.FirstValid, recordToRegister.LastValid)
+ return nil
+}
+
+func (db *participationDB) Record(account basics.Address, round basics.Round, participationAction ParticipationAction) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ matches := make([]ParticipationRecord, 0, 1)
+
+ // At most one id should be updated, exit with error if a second is found.
+ for _, record := range db.cache {
+ if record.Account == account && recordActive(record, round) {
+ if len(matches) != 0 {
+ // This probably means there is a bug in the key participation registry Register implementation.
+ return ErrMultipleValidKeys
+ }
+ matches = append(matches, record)
+ }
+ }
+
+ if len(matches) == 0 {
+ // This indicates the participation registry is not synchronized with agreement.
+ return ErrActiveKeyNotFound
+ }
+
+ record := matches[0]
+ // Good case, one key found.
+ switch participationAction {
+ case Vote:
+ record.LastVote = round
+ case BlockProposal:
+ record.LastBlockProposal = round
+ case StateProof:
+ record.LastStateProof = round
+ default:
+ return ErrUnknownParticipationAction
+ }
+
+ db.dirty[record.ParticipationID] = struct{}{}
+ db.cache[record.ParticipationID] = record
+ return nil
+}
+
+// Flush waits until all enqueued asynchronous IO has completed.
+// Waiting for all asynchronous IO to complete includes actions from other threads.
+// Flush waits for the participation registry to be idle.
+// Flush returns the latest error generated by async IO, if any.
+func (db *participationDB) Flush(timeout time.Duration) error {
+ resultCh := make(chan error, 1)
+ timeoutCh := time.After(timeout)
+ writeRecord := partDBWriteRecord{
+ flushResultChannel: resultCh,
+ }
+
+ select {
+ case db.writeQueue <- writeRecord:
+ case <-timeoutCh:
+ return fmt.Errorf("timeout while requesting flush, check results manually")
+ }
+
+ select {
+ case err := <-resultCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("timeout while flushing changes, check results manually")
+ }
+}
+
+// Close attempts to flush with db.flushTimeout, then waits for the write queue for another db.flushTimeout.
+func (db *participationDB) Close() {
+ if err := db.Flush(db.flushTimeout); err != nil {
+ db.log.Warnf("participationDB unhandled error during Close/Flush: %w", err)
+ }
+
+ db.store.Close()
+ close(db.writeQueue)
+
+ // Wait for write queue to close.
+ select {
+ case <-db.writeQueueDone:
+ return
+ case <-time.After(db.flushTimeout):
+ db.log.Warnf("Close(): timeout while waiting for WriteQueue to finish.")
+ }
+}
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
new file mode 100644
index 000000000..d000f16cb
--- /dev/null
+++ b/data/account/participationRegistry_test.go
@@ -0,0 +1,769 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package account
+
+import (
+ "context"
+ "database/sql"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+func getRegistry(t *testing.T) *participationDB {
+ rootDB, err := db.OpenPair(t.Name(), true)
+ require.NoError(t, err)
+
+ registry, err := makeParticipationRegistry(rootDB, logging.TestingLog(t))
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+
+ return registry
+}
+
+func assertParticipation(t *testing.T, p Participation, pr ParticipationRecord) {
+ require.Equal(t, p.FirstValid, pr.FirstValid)
+ require.Equal(t, p.LastValid, pr.LastValid)
+ require.Equal(t, p.KeyDilution, pr.KeyDilution)
+ require.Equal(t, p.Parent, pr.Account)
+}
+
+func makeTestParticipation(addrID int, first, last basics.Round, dilution uint64) Participation {
+ p := Participation{
+ FirstValid: first,
+ LastValid: last,
+ KeyDilution: dilution,
+ Voting: &crypto.OneTimeSignatureSecrets{},
+ VRF: &crypto.VRFSecrets{},
+ }
+ binary.LittleEndian.PutUint32(p.Parent[:], uint32(addrID))
+ return p
+}
+
+func registryCloseTest(t *testing.T, registry *participationDB) {
+ start := time.Now()
+ registry.Close()
+ duration := time.Since(start)
+ assert.Less(t, uint64(duration), uint64(defaultTimeout))
+}
+
+// Insert participation records and make sure they can be fetched.
+func TestParticipation_InsertGet(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ insertAndVerify := func(part Participation) {
+ id, err := registry.Insert(part)
+ a.NoError(err)
+ a.Equal(part.ID(), id)
+
+ record := registry.Get(part.ID())
+ a.False(record.IsZero())
+ assertParticipation(t, part, record)
+ }
+
+ // Verify inserting some records.
+ insertAndVerify(p)
+ insertAndVerify(p2)
+
+ // Data should be available immediately
+ results := registry.GetAll()
+ a.Len(results, 2)
+ for _, record := range results {
+ if record.Account == p.Parent {
+ assertParticipation(t, p, record)
+ } else if record.Account == p2.Parent {
+ assertParticipation(t, p2, record)
+ } else {
+ a.Fail("unexpected account")
+ }
+ }
+
+ // Check that Flush works, re-initialize cache and verify GetAll.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ results = registry.GetAll()
+ a.Len(results, 2)
+ for _, record := range results {
+ if record.Account == p.Parent {
+ assertParticipation(t, p, record)
+ } else if record.Account == p2.Parent {
+ assertParticipation(t, p2, record)
+ } else {
+ a.Fail("unexpected account")
+ }
+ }
+}
+
+// Make sure a record can be deleted by id.
+func TestParticipation_Delete(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ id, err = registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id)
+
+ err = registry.Delete(p.ID())
+ a.NoError(err)
+
+ results := registry.GetAll()
+ a.Len(results, 1)
+ assertParticipation(t, p2, results[0])
+
+ // Check that result was persisted.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ results = registry.GetAll()
+ a.Len(results, 1)
+ assertParticipation(t, p2, results[0])
+}
+
+func TestParticipation_DeleteExpired(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ for i := 10; i < 20; i++ {
+ p := makeTestParticipation(i, 1, basics.Round(i), 1)
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+ }
+
+ err := registry.DeleteExpired(15)
+ a.NoError(err)
+
+ a.Len(registry.GetAll(), 5, "The first 5 should be deleted.")
+
+ // Check persisting. Verify by re-initializing the cache.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ a.Len(registry.GetAll(), 5, "The first 5 should be deleted.")
+}
+
+// Make sure the register function properly sets effective first/last for all effected records.
+func TestParticipation_Register(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // Overlapping keys.
+ p := makeTestParticipation(1, 250000, 3000000, 1)
+ p2 := makeTestParticipation(1, 200000, 4000000, 2)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ id, err = registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id)
+
+ verifyEffectiveRound := func(id ParticipationID, first, last int) {
+ record := registry.Get(id)
+ a.False(record.IsZero())
+ require.Equal(t, first, int(record.EffectiveFirst))
+ require.Equal(t, last, int(record.EffectiveLast))
+ }
+
+ // Register the first key.
+ err = registry.Register(p.ID(), 500000)
+ a.NoError(err)
+ verifyEffectiveRound(p.ID(), 500000, int(p.LastValid))
+
+ // Register second key.
+ err = registry.Register(p2.ID(), 2500000)
+ a.NoError(err)
+ verifyEffectiveRound(p.ID(), 500000, 2499999)
+ verifyEffectiveRound(p2.ID(), 2500000, int(p2.LastValid))
+}
+
+// Test error when registering a non-existing participation ID.
+func TestParticipation_RegisterInvalidID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(0, 250000, 3000000, 1)
+
+ err := registry.Register(p.ID(), 10000000)
+ a.EqualError(err, ErrParticipationIDNotFound.Error())
+}
+
+// Test error attempting to register a key with an invalid range.
+func TestParticipation_RegisterInvalidRange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(0, 250000, 3000000, 1)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ // Register the first key.
+ err = registry.Register(p.ID(), 1000000000)
+ a.EqualError(err, ErrInvalidRegisterRange.Error())
+}
+
+// Test the recording function.
+func TestParticipation_Record(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // Setup p
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ // Setup some other keys to make sure they are not updated.
+ p2 := makeTestParticipation(2, 0, 3000000, 1)
+ p3 := makeTestParticipation(3, 0, 3000000, 1)
+
+ // Install and register all of the keys
+ for _, part := range []Participation{p, p2, p3} {
+ id, err := registry.Insert(part)
+ a.NoError(err)
+ a.Equal(part.ID(), id)
+ err = registry.Register(part.ID(), 0)
+ a.NoError(err)
+ }
+
+ a.NotNil(registry.GetAll())
+
+ a.NoError(registry.Record(p.Parent, 1000, Vote))
+ a.NoError(registry.Record(p.Parent, 2000, BlockProposal))
+ a.NoError(registry.Record(p.Parent, 3000, StateProof))
+
+ // Verify that one and only one key was updated.
+ test := func(registry ParticipationRegistry) {
+ records := registry.GetAll()
+ a.Len(records, 3)
+ for _, record := range records {
+ if record.ParticipationID == p.ID() {
+ require.Equal(t, 1000, int(record.LastVote))
+ require.Equal(t, 2000, int(record.LastBlockProposal))
+ require.Equal(t, 3000, int(record.LastStateProof))
+ } else {
+ require.Equal(t, 0, int(record.LastVote))
+ require.Equal(t, 0, int(record.LastBlockProposal))
+ require.Equal(t, 0, int(record.LastStateProof))
+ }
+ }
+ }
+
+ test(registry)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Len(registry.dirty, 0)
+
+ // Re-initialize
+ a.NoError(registry.initializeCache())
+ test(registry)
+}
+
+// Test that attempting to record an invalid action generates an error.
+func TestParticipation_RecordInvalidActionAndOutOfRange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ err = registry.Register(id, 0)
+ a.NoError(err)
+
+ err = registry.Record(p.Parent, 0, ParticipationAction(9000))
+ a.EqualError(err, ErrUnknownParticipationAction.Error())
+
+ err = registry.Record(p.Parent, 3000000, ParticipationAction(9000))
+ a.EqualError(err, ErrUnknownParticipationAction.Error())
+
+ err = registry.Record(p.Parent, 3000001, ParticipationAction(9000))
+ a.EqualError(err, ErrActiveKeyNotFound.Error())
+}
+
+func TestParticipation_RecordNoKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ err := registry.Record(basics.Address{}, 0, Vote)
+ a.EqualError(err, ErrActiveKeyNotFound.Error())
+}
+
+// Test that an error is generated if the record function updates multiple records.
+// This would only happen if the DB was in an inconsistent state.
+func TestParticipation_RecordMultipleUpdates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // We'll test that recording at this round fails because both keys are active
+ testRound := basics.Round(5000)
+
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ p2 := makeTestParticipation(1, 1, 3000000, 1)
+
+ _, err := registry.Insert(p)
+ a.NoError(err)
+ _, err = registry.Insert(p2)
+ a.NoError(err)
+ err = registry.Register(p.ID(), p.FirstValid)
+ a.NoError(err)
+
+ // Force the DB to have 2 active keys for one account by tampering with the private cache variable
+ recordCopy := registry.cache[p2.ID()]
+ recordCopy.EffectiveFirst = p2.FirstValid
+ recordCopy.EffectiveLast = p2.LastValid
+ registry.cache[p2.ID()] = recordCopy
+ registry.dirty[p2.ID()] = struct{}{}
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Len(registry.dirty, 0)
+ a.NoError(registry.initializeCache())
+
+ // Verify bad state - both records are valid until round 3 million
+ a.NotEqual(p.ID(), p2.ID())
+ recordTest := make([]ParticipationRecord, 0)
+
+ recordP := registry.Get(p.ID())
+ a.False(recordP.IsZero())
+ recordTest = append(recordTest, recordP)
+
+ recordP2 := registry.Get(p2.ID())
+ a.False(recordP2.IsZero())
+ recordTest = append(recordTest, recordP2)
+
+ // Make sure both accounts are active for the test round
+ for _, record := range recordTest {
+ a.True(recordActive(record, testRound), "both records should be active")
+ }
+
+ err = registry.Record(p.Parent, testRound, Vote)
+ a.EqualError(err, ErrMultipleValidKeys.Error())
+}
+
+func TestParticipation_MultipleInsertError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+
+ _, err := registry.Insert(p)
+ a.NoError(err)
+ _, err = registry.Insert(p)
+ a.Error(err, ErrAlreadyInserted.Error())
+}
+
+// This is a contrived test on every level. To workaround errors we setup the
+// DB and cache in ways that are impossible with public methods.
+//
+// Basically multiple records with the same ParticipationID are a big no-no and
+// it should be detected as quickly as possible.
+func TestParticipation_RecordMultipleUpdates_DB(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+
+ p := makeTestParticipation(1, 1, 2000000, 3)
+ id := p.ID()
+
+ // Insert the same record twice
+ // Pretty much copied from the Insert function without error checking.
+ err := registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ for i := 0; i < 2; i++ {
+ record := p
+ _, err := tx.Exec(
+ insertKeysetQuery,
+ id[:],
+ record.Parent[:],
+ record.FirstValid,
+ record.LastValid,
+ record.KeyDilution,
+ nil)
+ if err != nil {
+ return fmt.Errorf("unable to insert keyset: %w", err)
+ }
+
+ // Fetch primary key
+ var pk int
+ row := tx.QueryRow(selectLastPK, id[:])
+ err = row.Scan(&pk)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+
+ // Create Rolling entry
+ _, err = tx.Exec(`INSERT INTO Rolling (pk, effectiveFirstRound, effectiveLastRound) VALUES (?, ?, ?)`, pk, 1, 200000)
+ if err != nil {
+ return fmt.Errorf("unable insert rolling: %w", err)
+ }
+
+ var num int
+ row = tx.QueryRow(`SELECT COUNT(*) FROM Keysets WHERE participationID=?`, id[:])
+ err = row.Scan(&num)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+ }
+
+ return nil
+ })
+
+ a.NoError(err)
+
+ // Now that the DB has multiple records for one participation ID, check that all the methods notice.
+
+ // Initializing the cache
+ err = registry.initializeCache()
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+
+ // Registering the ID - No error because it is already registered so we don't try to re-register.
+ registry.cache[id] = ParticipationRecord{
+ ParticipationID: id,
+ Account: p.Parent,
+ FirstValid: p.FirstValid,
+ LastValid: p.LastValid,
+ KeyDilution: p.KeyDilution,
+ EffectiveFirst: p.FirstValid,
+ EffectiveLast: p.LastValid,
+ }
+ err = registry.Register(id, 1)
+ a.NoError(err)
+
+ // Clear the first/last so that the no-op registration can't be detected
+ record := registry.cache[id]
+ record.EffectiveFirst = 0
+ record.EffectiveLast = 0
+ registry.cache[id] = record
+
+ err = registry.Register(id, 1)
+ a.NoError(err)
+ err = registry.Flush(defaultTimeout)
+ a.Error(err)
+ a.Contains(err.Error(), "unable to disable old key")
+ a.EqualError(errors.Unwrap(err), ErrMultipleKeysForID.Error())
+
+ // Flushing changes detects that multiple records are updated
+ registry.dirty[id] = struct{}{}
+ err = registry.Flush(defaultTimeout)
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+ a.Len(registry.dirty, 1)
+
+ err = registry.Flush(defaultTimeout)
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+
+ // Make sure the error message is logged when closing the registry.
+ var logOutput strings.Builder
+ registry.log.SetOutput(&logOutput)
+ registry.Close()
+ a.Contains(logOutput.String(), "participationDB unhandled error during Close/Flush")
+ a.Contains(logOutput.String(), ErrMultipleKeysForID.Error())
+}
+
+func TestParticipation_NoKeyToUpdate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ record := ParticipationRecord{
+ ParticipationID: ParticipationID{},
+ Account: basics.Address{},
+ FirstValid: 1,
+ LastValid: 2,
+ KeyDilution: 3,
+ EffectiveFirst: 4,
+ EffectiveLast: 5,
+ }
+ err := updateRollingFields(ctx, tx, record)
+ a.EqualError(err, ErrNoKeyForID.Error())
+ return nil
+ })
+}
+
+// TestParticipion_Blobs adds some secrets to the registry and makes sure the same ones are returned.
+func TestParticipion_Blobs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ access, err := db.MakeAccessor("writetest_root", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ access.Close()
+ a.NoError(err)
+
+ access, err = db.MakeAccessor("writetest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ part, err := FillDBWithParticipationKeys(access, root.Address(), 0, 101, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ access.Close()
+ a.NoError(err)
+
+ check := func(id ParticipationID) {
+ record := registry.Get(id)
+ a.NotEqual(ParticipationRecord{}, record)
+ a.Equal(id, record.ParticipationID)
+ a.Equal(part.VRF, record.VRF)
+ a.Equal(part.Voting.Snapshot(), record.Voting.Snapshot())
+ }
+
+ id, err := registry.Insert(part.Participation)
+ a.NoError(err)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ // check the initial caching
+ check(id)
+
+ // check the re-initialized object
+ a.NoError(registry.initializeCache())
+ check(id)
+}
+
+// TestParticipion_EmptyBlobs makes sure empty blobs are set to nil
+func TestParticipion_EmptyBlobs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ access, err := db.MakeAccessor("writetest_root", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ access.Close()
+ a.NoError(err)
+
+ access, err = db.MakeAccessor("writetest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ part, err := FillDBWithParticipationKeys(access, root.Address(), 0, 101, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ access.Close()
+ a.NoError(err)
+ part.VRF = nil
+ part.Voting = nil
+
+ check := func(id ParticipationID) {
+ record := registry.Get(id)
+ a.NotEqual(ParticipationRecord{}, record)
+ a.Equal(id, record.ParticipationID)
+ a.True(record.VRF.MsgIsZero())
+ a.True(record.Voting.MsgIsZero())
+ }
+
+ id, err := registry.Insert(part.Participation)
+ a.NoError(err)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ // check the initial caching
+ check(id)
+
+ // check the re-initialized object
+ a.NoError(registry.initializeCache())
+ check(id)
+}
+
+func TestRegisterUpdatedEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ id1, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id1)
+
+ id2, err := registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id2)
+
+ record1 := registry.Get(id1)
+ a.False(record1.IsZero())
+ record2 := registry.Get(id2)
+ a.False(record2.IsZero())
+
+ // Delete the second one to make sure it can't be updated.
+ a.NoError(registry.Delete(id2))
+ a.NoError(registry.Flush(defaultTimeout))
+
+ // Ignore optional error
+ updates := make(map[ParticipationID]updatingParticipationRecord)
+ updates[id1] = updatingParticipationRecord{
+ ParticipationRecord: record1,
+ required: true,
+ }
+ updates[id2] = updatingParticipationRecord{
+ ParticipationRecord: record2,
+ required: false,
+ }
+
+ registry.writeQueue <- partDBWriteRecord{
+ registerUpdated: updates,
+ }
+
+ a.NoError(registry.Flush(defaultTimeout))
+
+ // This time, make it required and we should have an error
+ updates[id2] = updatingParticipationRecord{
+ ParticipationRecord: record2,
+ required: true,
+ }
+
+ registry.writeQueue <- partDBWriteRecord{
+ registerUpdated: updates,
+ }
+
+ err = registry.Flush(defaultTimeout)
+ a.Contains(err.Error(), "unable to disable old key when registering")
+ a.Contains(err.Error(), ErrNoKeyForID.Error())
+}
+
+// TestFlushDeadlock reproduced a deadlock when calling Flush repeatedly. This test reproduced the deadlock and
+// verifies the fix.
+func TestFlushDeadlock(t *testing.T) {
+ var wg sync.WaitGroup
+
+ partitiontest.PartitionTest(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ spam := func() {
+ defer wg.Done()
+ timeout := time.After(time.Second)
+ for {
+ select {
+ case <-timeout:
+ return
+ default:
+ // If there is a deadlock, this timeout will trigger.
+ assert.NoError(t, registry.Flush(2*time.Second))
+ }
+ }
+ }
+
+ // Start spammers.
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go spam()
+ }
+
+ wg.Wait()
+}
+
+func benchmarkKeyRegistration(numKeys int, b *testing.B) {
+ // setup
+ rootDB, err := db.OpenPair(b.Name(), true)
+ if err != nil {
+ b.Fail()
+ }
+ registry, err := makeParticipationRegistry(rootDB, logging.TestingLog(b))
+ if err != nil {
+ b.Fail()
+ }
+
+ // Insert records so that we can t
+ b.Run(fmt.Sprintf("KeyInsert_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+ registry.Insert(p)
+ }
+ }
+ })
+
+ // The first call to Register updates the DB.
+ b.Run(fmt.Sprintf("KeyRegistered_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+
+ // Unfortunately we need to repeatedly clear out the registration fields to ensure the
+ // db update runs each time this is called.
+ record := registry.cache[p.ID()]
+ record.EffectiveFirst = 0
+ record.EffectiveLast = 0
+ registry.cache[p.ID()] = record
+ registry.Register(p.ID(), 50)
+ }
+ }
+ })
+
+ // The keys should now be updated, so Register is a no-op.
+ b.Run(fmt.Sprintf("NoOp_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+ registry.Register(p.ID(), 50)
+ }
+ }
+ })
+}
+
+func BenchmarkKeyRegistration1(b *testing.B) { benchmarkKeyRegistration(1, b) }
+func BenchmarkKeyRegistration5(b *testing.B) { benchmarkKeyRegistration(5, b) }
+func BenchmarkKeyRegistration10(b *testing.B) { benchmarkKeyRegistration(10, b) }
+func BenchmarkKeyRegistration50(b *testing.B) { benchmarkKeyRegistration(50, b) }
diff --git a/data/accountManager.go b/data/accountManager.go
index 79a57287b..b615a211f 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -18,11 +18,11 @@ package data
import (
"fmt"
+ "time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -31,37 +31,27 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-// A ParticipationKeyIdentity defines the parameters that makes a pariticpation key unique.
-type ParticipationKeyIdentity struct {
- basics.Address // the address this participation key is used to vote for.
-
- // FirstValid and LastValid are inclusive.
- FirstValid basics.Round
- LastValid basics.Round
-
- VoteID crypto.OneTimeSignatureVerifier
- SelectionID crypto.VrfPubkey
-}
-
// AccountManager loads and manages accounts for the node
type AccountManager struct {
mu deadlock.Mutex
- partKeys map[ParticipationKeyIdentity]account.PersistedParticipation
+ partKeys map[account.ParticipationKeyIdentity]account.PersistedParticipation
// Map to keep track of accounts for which we've sent
// AccountRegistered telemetry events
registeredAccounts map[string]bool
- log logging.Logger
+ registry account.ParticipationRegistry
+ log logging.Logger
}
// MakeAccountManager creates a new AccountManager with a custom logger
-func MakeAccountManager(log logging.Logger) *AccountManager {
+func MakeAccountManager(log logging.Logger, registry account.ParticipationRegistry) *AccountManager {
manager := &AccountManager{}
manager.log = log
- manager.partKeys = make(map[ParticipationKeyIdentity]account.PersistedParticipation)
+ manager.partKeys = make(map[account.ParticipationKeyIdentity]account.PersistedParticipation)
manager.registeredAccounts = make(map[string]bool)
+ manager.registry = registry
return manager
}
@@ -77,6 +67,31 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
}
}
return out
+
+ // PKI TODO: source keys from the registry.
+ // This kinda works, but voting keys are not updated.
+ /*
+ for _, record := range manager.registry.GetAll() {
+ part := account.Participation{
+ Parent: record.Account,
+ VRF: record.VRF,
+ Voting: record.Voting,
+ FirstValid: record.FirstValid,
+ LastValid: record.LastValid,
+ KeyDilution: record.KeyDilution,
+ }
+
+ if part.OverlapsInterval(rnd, rnd) {
+ out = append(out, part)
+
+ id := part.ID()
+ if !bytes.Equal(id[:], record.ParticipationID[:]) {
+ manager.log.Warnf("Participation IDs do not equal while fetching keys... %s != %s\n", id, record.ParticipationID)
+ }
+ }
+ }
+ return out
+ */
}
// HasLiveKeys returns true if we have any Participation
@@ -97,18 +112,28 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
// The return value indicates if the key has been added (true) or
// if this is a duplicate key (false).
func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation) bool {
+ // Tell the ParticipationRegistry about the Participation. Duplicate entries
+ // are ignored.
+ pid, err := manager.registry.Insert(participation.Participation)
+ if err != nil && err != account.ErrAlreadyInserted {
+ manager.log.Warnf("Failed to insert participation key.")
+ }
+ manager.log.Infof("Inserted key (%s) for account (%s) first valid (%d) last valid (%d)\n",
+ pid, participation.Parent, participation.FirstValid, participation.LastValid)
+
manager.mu.Lock()
defer manager.mu.Unlock()
address := participation.Address()
first, last := participation.ValidInterval()
- partkeyID := ParticipationKeyIdentity{
- Address: address,
+ partkeyID := account.ParticipationKeyIdentity{
+ Parent: address,
FirstValid: first,
LastValid: last,
+ VRFSK: participation.VRF.SK,
VoteID: participation.Voting.OneTimeSignatureVerifier,
- SelectionID: participation.VRF.PK,
+ KeyDilution: participation.KeyDilution,
}
// Check if we already have participation keys for this address in this interval
@@ -177,11 +202,40 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader,
}
}()
- // wait all all disk flushes, and report errors as they appear.
+ // wait for all disk flushes, and report errors as they appear.
for errString, errCh := range pendingItems {
err := <-errCh
if err != nil {
logging.Base().Warnf("%s: %v", errString, err)
}
}
+
+ // PKI TODO: This needs to update the partkeys also, see the 'DeleteOldKeys' function above, it's part
+ // is part of PersistedParticipation, but just calls 'part.Voting.DeleteBeforeFineGrained'
+ // Delete expired records from participation registry.
+ if err := manager.registry.DeleteExpired(latestHdr.Round); err != nil {
+ manager.log.Warnf("error while deleting expired records from participation registry: %w", err)
+ }
+}
+
+// Registry fetches the ParticipationRegistry.
+func (manager *AccountManager) Registry() account.ParticipationRegistry {
+ return manager.registry
+}
+
+// FlushRegistry tells the underlying participation registry to flush it's change cache to the DB.
+func (manager *AccountManager) FlushRegistry(timeout time.Duration) {
+ err := manager.registry.Flush(timeout)
+ if err != nil {
+ manager.log.Warnf("error while flushing the registry: %w", err)
+ }
+}
+
+// Record asynchronously records a participation key usage event.
+func (manager *AccountManager) Record(account basics.Address, round basics.Round, participationType account.ParticipationAction) {
+ // This function updates a cache in the ParticipationRegistry, we must call Flush to persist the changes.
+ err := manager.registry.Record(account, round, participationType)
+ if err != nil {
+ manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %w", account, participationType, round, err)
+ }
}
diff --git a/data/basics/ccertpart.go b/data/basics/ccertpart.go
new file mode 100644
index 000000000..097cb6c27
--- /dev/null
+++ b/data/basics/ccertpart.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package basics
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// A Participant corresponds to an account whose AccountData.Status
+// is Online, and for which the expected sigRound satisfies
+// AccountData.VoteFirstValid <= sigRound <= AccountData.VoteLastValid.
+//
+// In the Algorand ledger, it is possible for multiple accounts to have
+// the same PK. Thus, the PK is not necessarily unique among Participants.
+// However, each account will produce a unique Participant struct, to avoid
+// potential DoS attacks where one account claims to have the same VoteID PK
+// as another account.
+type Participant struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ // PK is AccountData.VoteID.
+ PK crypto.OneTimeSignatureVerifier `codec:"p"`
+
+ // Weight is AccountData.MicroAlgos.
+ Weight uint64 `codec:"w"`
+
+ // KeyDilution is AccountData.KeyDilution() with the protocol for sigRound
+ // as expected by the Builder.
+ KeyDilution uint64 `codec:"d"`
+}
+
+// ToBeHashed implements the crypto.Hashable interface.
+func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.CompactCertPart, protocol.Encode(&p)
+}
diff --git a/data/basics/fields_test.go b/data/basics/fields_test.go
new file mode 100644
index 000000000..8027fa29a
--- /dev/null
+++ b/data/basics/fields_test.go
@@ -0,0 +1,201 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package basics_test
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+type typePath []string
+
+func (p typePath) addMapKey() typePath {
+ return append(p, "map_key")
+}
+
+func (p typePath) addValue() typePath {
+ return append(p, "value")
+}
+
+func (p typePath) addField(fieldName string) typePath {
+ return append(p, "field "+fieldName)
+}
+
+func (p typePath) validatePathFrom(t reflect.Type) error {
+ if len(p) == 0 {
+ // path is empty, so it's vacuously valid
+ return nil
+ }
+
+ value := p[0]
+ switch {
+ case value == "map_key":
+ return p[1:].validatePathFrom(t.Key())
+ case value == "value":
+ return p[1:].validatePathFrom(t.Elem())
+ case strings.HasPrefix(value, "field "):
+ fieldName := value[len("field "):]
+ fieldType, ok := t.FieldByName(fieldName)
+ if !ok {
+ return fmt.Errorf("Type '%s' does not have the field '%s'", t.Name(), fieldName)
+ }
+ return p[1:].validatePathFrom(fieldType.Type)
+ default:
+ return fmt.Errorf("Unexpected item in path: %s", value)
+ }
+}
+
+func (p typePath) Equals(other typePath) bool {
+ if len(p) != len(other) {
+ return false
+ }
+ for i := range p {
+ if p[i] != other[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (p typePath) String() string {
+ return strings.Join(p, "->")
+}
+
+func checkReferencedTypes(seen map[reflect.Type]bool, path typePath, typeStack []reflect.Type, check func(path typePath, stack []reflect.Type) bool) {
+ currentType := typeStack[len(typeStack)-1]
+
+ if _, seenType := seen[currentType]; seenType {
+ return
+ }
+
+ if !check(path, typeStack) {
+ // if currentType is not ok, don't visit its children
+ return
+ }
+
+ // add currentType to seen set, to avoid infinite recursion if currentType references itself
+ seen[currentType] = true
+
+ // after currentType's children are visited, "forget" the type, so we can examine it again if needed
+ // if this didn't happen, only 1 error per invalid type would get reported
+ defer delete(seen, currentType)
+
+ switch currentType.Kind() {
+ case reflect.Map:
+ newPath := path.addMapKey()
+ newStack := append(typeStack, currentType.Key())
+ checkReferencedTypes(seen, newPath, newStack, check)
+ fallthrough
+ case reflect.Array, reflect.Slice, reflect.Ptr:
+ newPath := path.addValue()
+ newStack := append(typeStack, currentType.Elem())
+ checkReferencedTypes(seen, newPath, newStack, check)
+ case reflect.Struct:
+ for i := 0; i < currentType.NumField(); i++ {
+ field := currentType.Field(i)
+ newPath := path.addField(field.Name)
+ newStack := append(typeStack, field.Type)
+ checkReferencedTypes(seen, newPath, newStack, check)
+ }
+ }
+}
+
+func makeTypeCheckFunction(t *testing.T, exceptions []typePath, startType reflect.Type) func(path typePath, stack []reflect.Type) bool {
+ for _, exception := range exceptions {
+ err := exception.validatePathFrom(startType)
+ require.NoError(t, err)
+ }
+
+ return func(path typePath, stack []reflect.Type) bool {
+ currentType := stack[len(stack)-1]
+
+ for _, exception := range exceptions {
+ if path.Equals(exception) {
+ t.Logf("Skipping exception for path: %s", path.String())
+ return true
+ }
+ }
+
+ switch currentType.Kind() {
+ case reflect.String:
+ t.Errorf("Invalid string type referenced from %s. Use []byte instead. Full path: %s", startType.Name(), path.String())
+ return false
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer:
+ // raise an error if one of these strange types is referenced too
+ t.Errorf("Invalid type %s referenced from %s. Full path: %s", currentType.Name(), startType.Name(), path.String())
+ return false
+ default:
+ return true
+ }
+ }
+}
+
+func TestBlockFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ typeToCheck := reflect.TypeOf(bookkeeping.Block{})
+
+ // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
+ exceptions := []typePath{
+ typePath{}.addField("BlockHeader").addField("GenesisID"),
+ typePath{}.addField("BlockHeader").addField("UpgradeState").addField("CurrentProtocol"),
+ typePath{}.addField("BlockHeader").addField("UpgradeState").addField("NextProtocol"),
+ typePath{}.addField("BlockHeader").addField("UpgradeVote").addField("UpgradePropose"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Type"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Header").addField("GenesisID"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("UnitName"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("AssetName"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("URL"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addMapKey(),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addValue().addField("Bytes"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addMapKey(),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addValue().addField("Bytes"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("Logs").addValue(),
+ }
+
+ seen := make(map[reflect.Type]bool)
+
+ checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+}
+
+func TestAccountDataFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ typeToCheck := reflect.TypeOf(basics.AccountData{})
+
+ // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
+ exceptions := []typePath{
+ typePath{}.addField("AssetParams").addValue().addField("UnitName"),
+ typePath{}.addField("AssetParams").addValue().addField("AssetName"),
+ typePath{}.addField("AssetParams").addValue().addField("URL"),
+ typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addMapKey(),
+ typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addValue().addField("Bytes"),
+ typePath{}.addField("AppParams").addValue().addField("GlobalState").addMapKey(),
+ typePath{}.addField("AppParams").addValue().addField("GlobalState").addValue().addField("Bytes"),
+ }
+
+ seen := make(map[reflect.Type]bool)
+
+ checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+}
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index 8e61362d6..f76a37db5 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -107,6 +107,14 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// Participant
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// Round
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -4118,6 +4126,158 @@ func (z DeltaAction) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *Participant) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(3)
+ var zb0001Mask uint8 /* 4 bits */
+ if (*z).KeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).PK.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).Weight == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "d"
+ o = append(o, 0xa1, 0x64)
+ o = msgp.AppendUint64(o, (*z).KeyDilution)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "p"
+ o = append(o, 0xa1, 0x70)
+ o = (*z).PK.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "w"
+ o = append(o, 0xa1, 0x77)
+ o = msgp.AppendUint64(o, (*z).Weight)
+ }
+ }
+ return
+}
+
+func (_ *Participant) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Participant)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Participant) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).PK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PK")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Weight")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = Participant{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "p":
+ bts, err = (*z).PK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PK")
+ return
+ }
+ case "w":
+ (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Weight")
+ return
+ }
+ case "d":
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KeyDilution")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *Participant) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Participant)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Participant) Msgsize() (s int) {
+ s = 1 + 2 + (*z).PK.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *Participant) MsgIsZero() bool {
+ return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0) && ((*z).KeyDilution == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z Round) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint64(o, uint64(z))
diff --git a/data/basics/msgp_gen_test.go b/data/basics/msgp_gen_test.go
index 5ac65240f..8756c3c4e 100644
--- a/data/basics/msgp_gen_test.go
+++ b/data/basics/msgp_gen_test.go
@@ -372,6 +372,66 @@ func BenchmarkUnmarshalBalanceRecord(b *testing.B) {
}
}
+func TestMarshalUnmarshalParticipant(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := Participant{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingParticipant(t *testing.T) {
+ protocol.RunEncodingTest(t, &Participant{})
+}
+
+func BenchmarkMarshalMsgParticipant(b *testing.B) {
+ v := Participant{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgParticipant(b *testing.B) {
+ v := Participant{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalParticipant(b *testing.B) {
+ v := Participant{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalStateDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := StateDelta{}
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 4a5253e42..a619438d3 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -96,6 +96,19 @@ func UnmarshalStatus(value string) (s Status, err error) {
return
}
+// OnlineAccountData contains the voting information for a single account.
+//msgp:ignore OnlineAccountData
+type OnlineAccountData struct {
+ MicroAlgosWithRewards MicroAlgos
+
+ VoteID crypto.OneTimeSignatureVerifier
+ SelectionID crypto.VRFVerifier
+
+ VoteFirstValid Round
+ VoteLastValid Round
+ VoteKeyDilution uint64
+}
+
// AccountData contains the data associated with a given address.
//
// This includes the account balance, cryptographic public keys,
@@ -397,6 +410,16 @@ func MakeAccountData(status Status, algos MicroAlgos) AccountData {
return AccountData{Status: status, MicroAlgos: algos}
}
+// ClearOnlineState resets the account's fields to indicate that the account is an offline account
+func (u *AccountData) ClearOnlineState() {
+ u.Status = Offline
+ u.VoteFirstValid = Round(0)
+ u.VoteLastValid = Round(0)
+ u.VoteKeyDilution = 0
+ u.VoteID = crypto.OneTimeSignatureVerifier{}
+ u.SelectionID = crypto.VRFVerifier{}
+}
+
// Money returns the amount of MicroAlgos associated with the user's account
func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money MicroAlgos, rewards MicroAlgos) {
e := u.WithUpdatedRewards(proto, rewardsLevel)
@@ -466,20 +489,35 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res MicroAlgos)
return res
}
-// VotingStake returns the amount of MicroAlgos associated with the user's account
-// for the purpose of participating in the Algorand protocol. It assumes the
-// caller has already updated rewards appropriately using WithUpdatedRewards().
-func (u AccountData) VotingStake() MicroAlgos {
+// OnlineAccountData returns subset of AccountData as OnlineAccountData data structure.
+// Account is expected to be Online otherwise its is cleared out
+func (u AccountData) OnlineAccountData() OnlineAccountData {
if u.Status != Online {
- return MicroAlgos{Raw: 0}
+ // if the account is not Online and agreement requests it for some reason, clear it out
+ return OnlineAccountData{}
+ }
+
+ return OnlineAccountData{
+ MicroAlgosWithRewards: u.MicroAlgos,
+
+ VoteID: u.VoteID,
+ SelectionID: u.SelectionID,
+ VoteFirstValid: u.VoteFirstValid,
+ VoteLastValid: u.VoteLastValid,
+ VoteKeyDilution: u.VoteKeyDilution,
}
+}
- return u.MicroAlgos
+// VotingStake returns the amount of MicroAlgos associated with the user's account
+// for the purpose of participating in the Algorand protocol. It assumes the
+// caller has already updated rewards appropriately using WithUpdatedRewards().
+func (u OnlineAccountData) VotingStake() MicroAlgos {
+ return u.MicroAlgosWithRewards
}
// KeyDilution returns the key dilution for this account,
// returning the default key dilution if not explicitly specified.
-func (u AccountData) KeyDilution(proto config.ConsensusParams) uint64 {
+func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 {
if u.VoteKeyDilution != 0 {
return u.VoteKeyDilution
}
diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go
index 1670fe58d..04a770d92 100644
--- a/data/basics/userBalance_test.go
+++ b/data/basics/userBalance_test.go
@@ -105,16 +105,11 @@ func makeString(len int) string {
return s
}
-func TestEncodedAccountDataSize(t *testing.T) {
- partitiontest.PartitionTest(t)
-
+func getSampleAccountData() AccountData {
oneTimeSecrets := crypto.GenerateOneTimeSignatureSecrets(0, 1)
vrfSecrets := crypto.GenerateVRFSecrets()
- maxStateSchema := StateSchema{
- NumUint: 0x1234123412341234,
- NumByteSlice: 0x1234123412341234,
- }
- ad := AccountData{
+
+ return AccountData{
Status: NotParticipating,
MicroAlgos: MicroAlgos{},
RewardsBase: 0x1234123412341234,
@@ -128,9 +123,19 @@ func TestEncodedAccountDataSize(t *testing.T) {
Assets: make(map[AssetIndex]AssetHolding),
AppLocalStates: make(map[AppIndex]AppLocalState),
AppParams: make(map[AppIndex]AppParams),
- TotalAppSchema: maxStateSchema,
AuthAddr: Address(crypto.Hash([]byte{1, 2, 3, 4})),
}
+}
+
+func TestEncodedAccountDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ maxStateSchema := StateSchema{
+ NumUint: 0x1234123412341234,
+ NumByteSlice: 0x1234123412341234,
+ }
+ ad := getSampleAccountData()
+ ad.TotalAppSchema = maxStateSchema
// TODO after applications enabled: change back to protocol.ConsensusCurrentVersion
currentConsensusParams := config.Consensus[protocol.ConsensusFuture]
@@ -253,3 +258,20 @@ func TestAppIndexHashing(t *testing.T) {
i = AppIndex(77)
require.Equal(t, "PCYUFPA2ZTOYWTP43MX2MOX2OWAIAXUDNC2WFCXAGMRUZ3DYD6BWFDL5YM", i.Address().String())
}
+
+func TestOnlineAccountData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ad := getSampleAccountData()
+ ad.MicroAlgos.Raw = 1000000
+ ad.Status = Offline
+
+ oad := ad.OnlineAccountData()
+ require.Empty(t, oad)
+
+ ad.Status = Online
+ oad = ad.OnlineAccountData()
+ require.Equal(t, ad.MicroAlgos, oad.MicroAlgosWithRewards)
+ require.Equal(t, ad.VoteID, oad.VoteID)
+ require.Equal(t, ad.SelectionID, oad.SelectionID)
+}
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index a60a9362c..bb9ae321b 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -125,6 +125,21 @@ type (
// for multiple types of certs.
//msgp:sort protocol.CompactCertType protocol.SortCompactCertType
CompactCert map[protocol.CompactCertType]CompactCertState `codec:"cc,allocbound=protocol.NumCompactCertTypes"`
+
+ // ParticipationUpdates contains the information needed to mark
+ // certain accounts offline because their participation keys expired
+ ParticipationUpdates
+ }
+
+ // ParticipationUpdates represents participation account data that
+ // needs to be checked/acted on by the network
+ ParticipationUpdates struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ // ExpiredParticipationAccounts contains a list of online accounts
+ // that needs to be converted to offline since their
+ // participation key expired.
+ ExpiredParticipationAccounts []basics.Address `codec:"partupdrmv,allocbound=config.MaxProposedExpiredOnlineAccounts"`
}
// RewardsState represents the global parameters controlling the rate
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index 8b88b8fbe..e7a810e3d 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -5,7 +5,9 @@ package bookkeeping
import (
"sort"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/msgp/msgp"
)
@@ -59,6 +61,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// ParticipationUpdates
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// RewardsState
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -80,108 +90,112 @@ import (
func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(24)
- var zb0003Mask uint32 /* 27 bits */
+ zb0004Len := uint32(25)
+ var zb0004Mask uint32 /* 29 bits */
if len((*z).BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
+ zb0004Len--
+ zb0004Mask |= 0x10
}
if (*z).BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
+ }
+ if len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x8) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x10) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).BlockHeader.CompactCert == nil {
@@ -201,117 +215,129 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).BlockHeader.UpgradeVote.UpgradeApprove)
@@ -329,214 +355,214 @@ func (_ *Block) CanMarshalMsg(z interface{}) bool {
func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).BlockHeader.CompactCert = nil
} else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0005)
+ (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -550,16 +576,45 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -570,11 +625,11 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = Block{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -714,27 +769,27 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0007 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumCompactCertTypes))
+ if zb0010 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 {
+ if zb0011 {
(*z).BlockHeader.CompactCert = nil
} else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0007)
+ (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
}
- for zb0007 > 0 {
+ for zb0010 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0007--
+ zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -747,6 +802,33 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0012 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).Payset.UnmarshalMsg(bts)
if err != nil {
@@ -781,13 +863,17 @@ func (z *Block) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).Payset.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *Block) MsgIsZero() bool {
- return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.CompactCert) == 0) && ((*z).Payset.MsgIsZero())
+ return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.CompactCert) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -822,104 +908,108 @@ func (z *BlockHash) MsgIsZero() bool {
func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(23)
- var zb0003Mask uint32 /* 26 bits */
+ zb0004Len := uint32(24)
+ var zb0004Mask uint32 /* 28 bits */
if len((*z).CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
+ zb0004Len--
+ zb0004Mask |= 0x10
}
if (*z).RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
+ }
+ if len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x8) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x10) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).CompactCert == nil {
@@ -939,112 +1029,124 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).GenesisID)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).TxnCounter)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).TimeStamp)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).UpgradeVote.UpgradeApprove)
@@ -1062,214 +1164,214 @@ func (_ *BlockHeader) CanMarshalMsg(z interface{}) bool {
func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).CompactCert = nil
} else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0005)
+ (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -1283,8 +1385,37 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1295,11 +1426,11 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = BlockHeader{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -1439,27 +1570,27 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0007 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumCompactCertTypes))
+ if zb0010 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 {
+ if zb0011 {
(*z).CompactCert = nil
} else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0007)
+ (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
}
- for zb0007 > 0 {
+ for zb0010 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0007--
+ zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -1472,6 +1603,33 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0012 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1500,12 +1658,16 @@ func (z *BlockHeader) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
return
}
// MsgIsZero returns whether this is a zero value
func (z *BlockHeader) MsgIsZero() bool {
- return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).CompactCert) == 0)
+ return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).CompactCert) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2132,6 +2294,164 @@ func (z *GenesisAllocation) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *ParticipationUpdates) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(1)
+ var zb0002Mask uint8 /* 2 bits */
+ if len((*z).ExpiredParticipationAccounts) == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ExpiredParticipationAccounts)))
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ o = (*z).ExpiredParticipationAccounts[zb0001].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *ParticipationUpdates) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationUpdates)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ParticipationUpdates) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0004 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0005 {
+ (*z).ExpiredParticipationAccounts = nil
+ } else if (*z).ExpiredParticipationAccounts != nil && cap((*z).ExpiredParticipationAccounts) >= zb0004 {
+ (*z).ExpiredParticipationAccounts = ((*z).ExpiredParticipationAccounts)[:zb0004]
+ } else {
+ (*z).ExpiredParticipationAccounts = make([]basics.Address, zb0004)
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ bts, err = (*z).ExpiredParticipationAccounts[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = ParticipationUpdates{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "partupdrmv":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0006 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0007 {
+ (*z).ExpiredParticipationAccounts = nil
+ } else if (*z).ExpiredParticipationAccounts != nil && cap((*z).ExpiredParticipationAccounts) >= zb0006 {
+ (*z).ExpiredParticipationAccounts = ((*z).ExpiredParticipationAccounts)[:zb0006]
+ } else {
+ (*z).ExpiredParticipationAccounts = make([]basics.Address, zb0006)
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ bts, err = (*z).ExpiredParticipationAccounts[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ParticipationUpdates) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationUpdates)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ParticipationUpdates) Msgsize() (s int) {
+ s = 1 + 11 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ s += (*z).ExpiredParticipationAccounts[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ParticipationUpdates) MsgIsZero() bool {
+ return (len((*z).ExpiredParticipationAccounts) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *RewardsState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/data/bookkeeping/msgp_gen_test.go b/data/bookkeeping/msgp_gen_test.go
index 44ff62e2d..8bad96593 100644
--- a/data/bookkeeping/msgp_gen_test.go
+++ b/data/bookkeeping/msgp_gen_test.go
@@ -312,6 +312,66 @@ func BenchmarkUnmarshalGenesisAllocation(b *testing.B) {
}
}
+func TestMarshalUnmarshalParticipationUpdates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ParticipationUpdates{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingParticipationUpdates(t *testing.T) {
+ protocol.RunEncodingTest(t, &ParticipationUpdates{})
+}
+
+func BenchmarkMarshalMsgParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalRewardsState(t *testing.T) {
partitiontest.PartitionTest(t)
v := RewardsState{}
diff --git a/data/committee/committee.go b/data/committee/committee.go
index 2bb5b0bcb..71409b56a 100644
--- a/data/committee/committee.go
+++ b/data/committee/committee.go
@@ -41,7 +41,7 @@ type Selector interface {
// This struct is used to decouple LedgerReader.AccountData from basics.BalanceRecord.
//msgp:ignore BalanceRecord
type BalanceRecord struct {
- basics.AccountData
+ basics.OnlineAccountData
Addr basics.Address
}
diff --git a/data/committee/common_test.go b/data/committee/common_test.go
index 0f1ec6b72..05fba36bd 100644
--- a/data/committee/common_test.go
+++ b/data/committee/common_test.go
@@ -124,7 +124,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward
if !ok {
return false, BalanceRecord{}, Seed{}, basics.MicroAlgos{Raw: 0}
}
- return true, BalanceRecord{Addr: addr, AccountData: data}, seed, total
+ return true, BalanceRecord{Addr: addr, OnlineAccountData: data.OnlineAccountData()}, seed, total
}
selParamsList := func(addrs []basics.Address) (ok bool, records []BalanceRecord, seed Seed, total basics.MicroAlgos) {
diff --git a/data/committee/credential_test.go b/data/committee/credential_test.go
index 2132a8d1f..2160d72e5 100644
--- a/data/committee/credential_test.go
+++ b/data/committee/credential_test.go
@@ -100,7 +100,7 @@ func TestRichAccountSelected(t *testing.T) {
}
TotalMoney := basics.MicroAlgos{Raw: 1 << 50}
- record.MicroAlgos.Raw = TotalMoney.Raw / 2
+ record.MicroAlgosWithRewards.Raw = TotalMoney.Raw / 2
sel := AgreementSelector{
Seed: selectionSeed,
Round: round,
@@ -163,7 +163,7 @@ func TestPoorAccountSelectedLeaders(t *testing.T) {
Step: Propose,
}
- record.MicroAlgos.Raw = uint64(1000 / len(addresses))
+ record.MicroAlgosWithRewards.Raw = uint64(1000 / len(addresses))
m := Membership{
Record: record,
Selector: sel,
@@ -209,7 +209,7 @@ func TestPoorAccountSelectedCommittee(t *testing.T) {
Step: step,
}
- record.MicroAlgos.Raw = uint64(2000 / len(addresses))
+ record.MicroAlgosWithRewards.Raw = uint64(2000 / len(addresses))
m := Membership{
Record: record,
Selector: sel,
@@ -247,7 +247,7 @@ func TestNoMoneyAccountNotSelected(t *testing.T) {
Step: Propose,
}
- record.MicroAlgos.Raw = 0
+ record.MicroAlgosWithRewards.Raw = 0
m := Membership{
Record: record,
Selector: sel,
@@ -272,7 +272,7 @@ func TestLeadersSelected(t *testing.T) {
t.Errorf("can't read selection params")
}
- record.MicroAlgos.Raw = 50000
+ record.MicroAlgosWithRewards.Raw = 50000
totalMoney := basics.MicroAlgos{Raw: 100000}
sel := AgreementSelector{
@@ -304,7 +304,7 @@ func TestCommitteeSelected(t *testing.T) {
t.Errorf("can't read selection params")
}
- record.MicroAlgos.Raw = 50000
+ record.MicroAlgosWithRewards.Raw = 50000
totalMoney := basics.MicroAlgos{Raw: 100000}
sel := AgreementSelector{
@@ -341,7 +341,7 @@ func TestAccountNotSelected(t *testing.T) {
Period: period,
Step: Propose,
}
- record.MicroAlgos.Raw = 0
+ record.MicroAlgosWithRewards.Raw = 0
m := Membership{
Record: record,
Selector: sel,
@@ -384,7 +384,7 @@ func BenchmarkSortition(b *testing.B) {
Step: step,
}
- record.MicroAlgos.Raw = uint64(money[i])
+ record.MicroAlgosWithRewards.Raw = uint64(money[i])
m := Membership{
Record: record,
Selector: sel,
diff --git a/data/datatest/impls.go b/data/datatest/impls.go
index 10e47fed7..30fe35ba9 100644
--- a/data/datatest/impls.go
+++ b/data/datatest/impls.go
@@ -19,7 +19,6 @@ package datatest
import (
"context"
"fmt"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -54,7 +53,7 @@ type entryFactoryImpl struct {
}
// AssembleBlock implements Ledger.AssembleBlock.
-func (i entryFactoryImpl) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (i entryFactoryImpl) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
prev, err := i.l.BlockHdr(round - 1)
if err != nil {
return nil, fmt.Errorf("could not make proposals: could not read block from ledger at round %v: %v", round, err)
@@ -101,9 +100,10 @@ func (i ledgerImpl) LookupDigest(r basics.Round) (crypto.Digest, error) {
return crypto.Digest(blockhdr.Hash()), nil
}
-// Lookup implements Ledger.Lookup.
-func (i ledgerImpl) Lookup(r basics.Round, addr basics.Address) (basics.AccountData, error) {
- return i.l.Lookup(r, addr)
+// Lookup implements Ledger.LookupAgreement.
+func (i ledgerImpl) LookupAgreement(r basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ a, err := i.l.LookupAgreement(r, addr)
+ return a, err
}
// Circulation implements Ledger.Circulation.
diff --git a/data/ledger.go b/data/ledger.go
index 3cc526889..0767d2948 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -101,7 +101,7 @@ func LoadLedger(
l := &Ledger{
log: log,
}
- genesisInitState := ledger.InitState{
+ genesisInitState := ledgercore.InitState{
Block: genBlock,
Accounts: genesisBal.Balances,
GenesisHash: genesisHash,
@@ -184,7 +184,7 @@ func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
}
}
- totals, err := l.Totals(r)
+ totals, err := l.OnlineTotals(r) //nolint:typecheck
if err != nil {
return basics.MicroAlgos{}, err
}
@@ -196,12 +196,12 @@ func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
circulation.elements[1],
{
round: r,
- onlineMoney: totals.Online.Money},
+ onlineMoney: totals},
},
})
}
- return totals.Online.Money, nil
+ return totals, nil
}
// Seed gives the VRF seed that was agreed on in a given round,
@@ -316,7 +316,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er
// EnsureValidatedBlock ensures that the block, and associated certificate c, are
// written to the ledger, or that some other block for the same round is
// written to the ledger.
-func (l *Ledger) EnsureValidatedBlock(vb *ledger.ValidatedBlock, c agreement.Certificate) {
+func (l *Ledger) EnsureValidatedBlock(vb *ledgercore.ValidatedBlock, c agreement.Certificate) {
round := vb.Block().Round()
for l.LastRound() < round {
diff --git a/data/ledger_test.go b/data/ledger_test.go
index e2a231802..c49598d3c 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -37,7 +37,7 @@ import (
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
-func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState ledger.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
+func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState ledgercore.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
var poolSecret, sinkSecret *crypto.SignatureSecrets
var seed crypto.Seed
@@ -157,7 +157,9 @@ func TestLedgerCirculation(t *testing.T) {
baseDestValue := data.MicroAlgos.Raw
blk := genesisInitState.Block
- totals, _ := realLedger.Totals(basics.Round(0))
+ totalsRound, totals, err := realLedger.LatestTotals()
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), totalsRound)
baseCirculation := totals.Online.Money.Raw
srcAccountKey := keys[sourceAccount]
@@ -192,15 +194,13 @@ func TestLedgerCirculation(t *testing.T) {
require.NoError(t, err)
require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
- totals, err = realLedger.Totals(rnd)
+ roundCirculation, err := realLedger.OnlineTotals(rnd)
require.NoError(t, err)
- roundCirculation := totals.Online.Money.Raw
- require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation)
+ require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
- totals, err = l.Totals(rnd)
+ roundCirculation, err = l.OnlineTotals(rnd)
require.NoError(t, err)
- roundCirculation = totals.Online.Money.Raw
- require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation)
+ require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(510) {
// test one round ago
data, err = realLedger.Lookup(rnd-1, destAccount)
@@ -210,15 +210,13 @@ func TestLedgerCirculation(t *testing.T) {
require.NoError(t, err)
require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
- totals, err = realLedger.Totals(rnd - 1)
+ roundCirculation, err := realLedger.OnlineTotals(rnd - 1)
require.NoError(t, err)
- roundCirculation := totals.Online.Money.Raw
- require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation)
+ require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
- totals, err = l.Totals(rnd - 1)
+ roundCirculation, err = l.OnlineTotals(rnd - 1)
require.NoError(t, err)
- roundCirculation = totals.Online.Money.Raw
- require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation)
+ require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(520) {
// test one round in the future ( expected error )
data, err = realLedger.Lookup(rnd+1, destAccount)
@@ -228,17 +226,17 @@ func TestLedgerCirculation(t *testing.T) {
require.Error(t, err)
require.Equal(t, uint64(0), data.MicroAlgos.Raw)
- _, err = realLedger.Totals(rnd + 1)
+ _, err = realLedger.OnlineTotals(rnd + 1)
require.Error(t, err)
- _, err = l.Totals(rnd + 1)
+ _, err = l.OnlineTotals(rnd + 1)
require.Error(t, err)
} else if rnd < basics.Round(520) {
// test expired round ( expected error )
- _, err = realLedger.Totals(rnd - 500)
+ _, err = realLedger.OnlineTotals(rnd - 500)
require.Error(t, err)
- _, err = l.Totals(rnd - 500)
+ _, err = l.OnlineTotals(rnd - 500)
require.Error(t, err)
}
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index 35871e91a..4295e82ff 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -17,6 +17,7 @@
package pools
import (
+ "errors"
"fmt"
"sync"
"sync/atomic"
@@ -62,7 +63,7 @@ type TransactionPool struct {
mu deadlock.Mutex
cond sync.Cond
expiredTxCount map[basics.Round]int
- pendingBlockEvaluator *ledger.BlockEvaluator
+ pendingBlockEvaluator BlockEvaluator
numPendingWholeBlocks basics.Round
feeThresholdMultiplier uint64
statusCache *statusCache
@@ -88,6 +89,20 @@ type TransactionPool struct {
rememberedTxids map[transactions.Txid]transactions.SignedTxn
log logging.Logger
+
+ // proposalAssemblyTime is the ProposalAssemblyTime configured for this node.
+ proposalAssemblyTime time.Duration
+}
+
+// BlockEvaluator defines the block evaluator interface exposed by the ledger package.
+type BlockEvaluator interface {
+ TestTransactionGroup(txgroup []transactions.SignedTxn) error
+ Round() basics.Round
+ PaySetSize() int
+ TransactionGroup(txads []transactions.SignedTxnWithAD) error
+ Transaction(txn transactions.SignedTxn, ad transactions.ApplyData) error
+ GenerateBlock() (*ledgercore.ValidatedBlock, error)
+ ResetTxnBytes()
}
// MakeTransactionPool makes a transaction pool.
@@ -105,6 +120,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Lo
logAssembleStats: cfg.EnableAssembleStats,
expFeeFactor: cfg.TxPoolExponentialIncreaseFactor,
txPoolMaxSize: cfg.TxPoolSize,
+ proposalAssemblyTime: cfg.ProposalAssemblyTime,
log: log,
}
pool.cond.L = &pool.mu
@@ -119,7 +135,7 @@ type poolAsmResults struct {
// the ok variable indicates whether the assembly for the block roundStartedEvaluating was complete ( i.e. ok == true ) or
// whether it's still in-progress.
ok bool
- blk *ledger.ValidatedBlock
+ blk *ledgercore.ValidatedBlock
stats telemetryspec.AssembleBlockMetrics
err error
// roundStartedEvaluating is the round which we were attempted to evaluate last. It's a good measure for
@@ -154,6 +170,9 @@ var ErrStaleBlockAssemblyRequest = fmt.Errorf("AssembleBlock: requested block as
// Reset resets the content of the transaction pool
func (pool *TransactionPool) Reset() {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+ defer pool.cond.Broadcast()
pool.pendingTxids = make(map[transactions.Txid]transactions.SignedTxn)
pool.pendingTxGroups = nil
pool.rememberedTxids = make(map[transactions.Txid]transactions.SignedTxn)
@@ -578,10 +597,10 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactio
stats.StopReason = telemetryspec.AssembleBlockAbandon
pool.assemblyResults.stats = *stats
pool.assemblyCond.Broadcast()
- } else if err == ledger.ErrNoSpace || pool.isAssemblyTimedOut() {
+ } else if err == ledgercore.ErrNoSpace || pool.isAssemblyTimedOut() {
pool.assemblyResults.ok = true
pool.assemblyResults.assemblyCompletedOrAbandoned = true
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
stats.StopReason = telemetryspec.AssembleBlockFull
} else {
stats.StopReason = telemetryspec.AssembleBlockTimeout
@@ -610,7 +629,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactio
func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.SignedTxn, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
err := pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
pool.numPendingWholeBlocks++
pool.pendingBlockEvaluator.ResetTxnBytes()
err = pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
@@ -665,8 +684,19 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
if hint < 0 || int(knownCommitted) < 0 {
hint = 0
}
- pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint)
+ pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint, 0)
if err != nil {
+ // The pendingBlockEvaluator is an interface, and in case of an evaluator error
+ // we want to remove the interface itself rather then keeping an interface
+ // to a nil.
+ pool.pendingBlockEvaluator = nil
+ var nonSeqBlockEval ledgercore.ErrNonSequentialBlockEval
+ if errors.As(err, &nonSeqBlockEval) {
+ if nonSeqBlockEval.EvaluatorRound <= nonSeqBlockEval.LatestRound {
+ pool.log.Infof("TransactionPool.recomputeBlockEvaluator: skipped creating block evaluator for round %d since ledger already caught up with that round", nonSeqBlockEval.EvaluatorRound)
+ return
+ }
+ }
pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err)
return
}
@@ -718,7 +748,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
// assembly. We want to figure out how long have we spent before trying to evaluate the first transaction.
// ( ideally it's near zero. The goal here is to see if we get to a near time-out situation before processing the
// first transaction group )
- asmStats.TransactionsLoopStartTime = int64(firstTxnGrpTime.Sub(pool.assemblyDeadline.Add(-config.ProposalAssemblyTime)))
+ asmStats.TransactionsLoopStartTime = int64(firstTxnGrpTime.Sub(pool.assemblyDeadline.Add(-pool.proposalAssemblyTime)))
}
if !pool.assemblyResults.ok && pool.assemblyRound <= pool.pendingBlockEvaluator.Round() {
@@ -743,7 +773,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
// AssembleBlock assembles a block for a given round, trying not to
// take longer than deadline to finish.
-func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledgercore.ValidatedBlock, err error) {
var stats telemetryspec.AssembleBlockMetrics
if pool.logAssembleStats {
@@ -853,7 +883,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
pool.log.Warnf("AssembleBlock: ran out of time for round %d", round)
stats.StopReason = telemetryspec.AssembleBlockTimeout
if emptyBlockErr != nil {
- emptyBlockErr = fmt.Errorf("AssembleBlock: failed to construct empty block : %v", emptyBlockErr)
+ emptyBlockErr = fmt.Errorf("AssembleBlock: failed to construct empty block : %w", emptyBlockErr)
}
return emptyBlock, emptyBlockErr
}
@@ -884,7 +914,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
// assembleEmptyBlock construct a new block for the given round. Internally it's using the ledger database calls, so callers
// need to be aware that it might take a while before it would return.
-func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *ledgercore.ValidatedBlock, err error) {
prevRound := round - 1
prev, err := pool.ledger.BlockHdr(prevRound)
if err != nil {
@@ -892,16 +922,24 @@ func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *
return nil, err
}
next := bookkeeping.MakeBlock(prev)
- blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0)
+ blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0, 0)
if err != nil {
- err = fmt.Errorf("TransactionPool.assembleEmptyBlock: cannot start evaluator for %d: %v", round, err)
+ var nonSeqBlockEval ledgercore.ErrNonSequentialBlockEval
+ if errors.As(err, &nonSeqBlockEval) {
+ if nonSeqBlockEval.EvaluatorRound <= nonSeqBlockEval.LatestRound {
+ // in the case that the ledger have already moved beyond that round, just let the agreement know that
+ // we don't generate a block and it's perfectly fine.
+ return nil, ErrStaleBlockAssemblyRequest
+ }
+ }
+ err = fmt.Errorf("TransactionPool.assembleEmptyBlock: cannot start evaluator for %d: %w", round, err)
return nil, err
}
return blockEval.GenerateBlock()
}
// AssembleDevModeBlock assemble a new block from the existing transaction pool. The pending evaluator is being
-func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledgercore.ValidatedBlock, err error) {
pool.mu.Lock()
defer pool.mu.Unlock()
@@ -910,6 +948,6 @@ func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledger.Validated
// The above was already pregenerating the entire block,
// so there won't be any waiting on this call.
- assembled, err = pool.AssembleBlock(pool.pendingBlockEvaluator.Round(), time.Now().Add(config.ProposalAssemblyTime))
+ assembled, err = pool.AssembleBlock(pool.pendingBlockEvaluator.Round(), time.Now().Add(pool.proposalAssemblyTime))
return
}
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 59d81e888..4e65333b6 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -85,7 +85,7 @@ func mockLedger(t TestingT, initAccounts map[basics.Address]basics.AccountData,
fn := fmt.Sprintf("/tmp/%s.%d.sqlite3", t.Name(), crypto.RandUint64())
const inMem = true
- genesisInitState := ledger.InitState{Block: initBlock, Accounts: initAccounts, GenesisHash: hash}
+ genesisInitState := ledgercore.InitState{Block: initBlock, Accounts: initAccounts, GenesisHash: hash}
cfg := config.GetDefaultLocal()
cfg.Archival = true
l, err := ledger.OpenLedger(logging.Base(), fn, true, genesisInitState, cfg)
@@ -101,13 +101,13 @@ func makeMockLedgerFuture(t TestingT, initAccounts map[basics.Address]basics.Acc
return mockLedger(t, initAccounts, protocol.ConsensusFuture)
}
-func newBlockEvaluator(t TestingT, l *ledger.Ledger) *ledger.BlockEvaluator {
+func newBlockEvaluator(t TestingT, l *ledger.Ledger) BlockEvaluator {
latest := l.Latest()
prev, err := l.BlockHdr(latest)
require.NoError(t, err)
next := bookkeeping.MakeBlock(prev)
- eval, err := l.StartEvaluator(next.BlockHeader, 0)
+ eval, err := l.StartEvaluator(next.BlockHeader, 0, 0)
require.NoError(t, err)
return eval
@@ -1167,7 +1167,7 @@ func BenchmarkTransactionPoolSteadyState(b *testing.B) {
for len(ledgerTxnQueue) > 0 {
stx := ledgerTxnQueue[0]
err := eval.Transaction(stx, transactions.ApplyData{})
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
break
}
require.NoError(b, err)
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 530a01b13..6bdc19457 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -10,7 +10,8 @@ The stack starts empty and contains values of either uint64 or bytes
4096 bytes in length). Most operations act on the stack, popping
arguments from it and pushing results to it.
-The maximum stack depth is currently 1000.
+The maximum stack depth is currently 1000. If the stack depth is
+exceed or if a `bytes` element exceed 4096 bytes, the program fails.
## Scratch Space
@@ -36,11 +37,12 @@ TEAL LogicSigs run in Algorand nodes as part of testing a proposed transaction t
If an authorized program executes and finishes with a single non-zero uint64 value on the stack then that program has validated the transaction it is attached to.
-The TEAL program has access to data from the transaction it is attached to (`txn` op), any transactions in a transaction group it is part of (`gtxn` op), and a few global values like consensus parameters (`global` op). Some "Args" may be attached to a transaction being validated by a TEAL program. Args are an array of byte strings. A common pattern would be to have the key to unlock some contract as an Arg. Args are recorded on the blockchain and publicly visible when the transaction is submitted to the network. These LogicSig Args are _not_ signed.
+The TEAL program has access to data from the transaction it is attached to (`txn` op), any transactions in a transaction group it is part of (`gtxn` op), and a few global values like consensus parameters (`global` op). Some "Args" may be attached to a transaction being validated by a TEAL program. Args are an array of byte strings. A common pattern would be to have the key to unlock some contract as an Arg. Args are recorded on the blockchain and publicly visible when the transaction is submitted to the network. These LogicSig Args are _not_ part of the transaction ID nor of the TxGroup hash. They also cannot be read from other TEAL programs in the group of transactions.
A program can either authorize some delegated action on a normal private key signed or multisig account or be wholly in charge of a contract account.
-* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program.
+* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program. Note that LogicSig Args are _not_ signed.
+
* If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it.
The TEAL bytecode plus the length of all Args must add up to no more than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total no more than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, the program's cost was estimated as the static sum of all the opcode costs in the program (whether they were actually executed or not). Beginning with v4, the program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails.
@@ -464,9 +466,10 @@ transaction types, are rejected by `itxn_submit`.
| Op | Description |
| --- | --- |
-| `itxn_begin` | Begin preparation of a new inner transaction |
-| `itxn_field f` | Set field F of the current inner transaction to X |
-| `itxn_submit` | Execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails. |
+| `itxn_begin` | begin preparation of a new inner transaction in a new transaction group |
+| `itxn_next` | begin preparation of a new inner transaction in the same transaction group |
+| `itxn_field f` | set field F of the current inner transaction to X |
+| `itxn_submit` | execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails. |
| `itxn f` | push field F of the last inner transaction to stack |
| `itxna f i` | push Ith value of the array field F of the last inner transaction to stack |
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index c673e4a0c..494dd5776 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -10,7 +10,8 @@ The stack starts empty and contains values of either uint64 or bytes
4096 bytes in length). Most operations act on the stack, popping
arguments from it and pushing results to it.
-The maximum stack depth is currently 1000.
+The maximum stack depth is currently 1000. If the stack depth is
+exceed or if a `bytes` element exceed 4096 bytes, the program fails.
## Scratch Space
@@ -36,11 +37,12 @@ TEAL LogicSigs run in Algorand nodes as part of testing a proposed transaction t
If an authorized program executes and finishes with a single non-zero uint64 value on the stack then that program has validated the transaction it is attached to.
-The TEAL program has access to data from the transaction it is attached to (`txn` op), any transactions in a transaction group it is part of (`gtxn` op), and a few global values like consensus parameters (`global` op). Some "Args" may be attached to a transaction being validated by a TEAL program. Args are an array of byte strings. A common pattern would be to have the key to unlock some contract as an Arg. Args are recorded on the blockchain and publicly visible when the transaction is submitted to the network. These LogicSig Args are _not_ signed.
+The TEAL program has access to data from the transaction it is attached to (`txn` op), any transactions in a transaction group it is part of (`gtxn` op), and a few global values like consensus parameters (`global` op). Some "Args" may be attached to a transaction being validated by a TEAL program. Args are an array of byte strings. A common pattern would be to have the key to unlock some contract as an Arg. Args are recorded on the blockchain and publicly visible when the transaction is submitted to the network. These LogicSig Args are _not_ part of the transaction ID nor of the TxGroup hash. They also cannot be read from other TEAL programs in the group of transactions.
A program can either authorize some delegated action on a normal private key signed or multisig account or be wholly in charge of a contract account.
-* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program.
+* If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program. Note that LogicSig Args are _not_ signed.
+
* If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it.
The TEAL bytecode plus the length of all Args must add up to no more than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total no more than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, the program's cost was estimated as the static sum of all the opcode costs in the program (whether they were actually executed or not). Beginning with v4, the program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails.
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 0d4d65876..99f020af2 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -896,7 +896,7 @@ params: Txn.Accounts offset (or, since v4, an account address that appears in Tx
- LogicSigVersion >= 2
- Mode: Application
-params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
+params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_global_get
@@ -918,7 +918,7 @@ params: state key. Return: value. The value is zero (of type uint64) if the key
- LogicSigVersion >= 2
- Mode: Application
-params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
+params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_local_put
@@ -1296,7 +1296,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Opcode: 0xb1
- Pops: _None_
- Pushes: _None_
-- Begin preparation of a new inner transaction
+- begin preparation of a new inner transaction in a new transaction group
- LogicSigVersion >= 5
- Mode: Application
@@ -1307,7 +1307,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Opcode: 0xb2 {uint8 transaction field index}
- Pops: *... stack*, any
- Pushes: _None_
-- Set field F of the current inner transaction to X
+- set field F of the current inner transaction to X
- LogicSigVersion >= 5
- Mode: Application
@@ -1318,10 +1318,12 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Opcode: 0xb3
- Pops: _None_
- Pushes: _None_
-- Execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails.
+- execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails.
- LogicSigVersion >= 5
- Mode: Application
+`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.
+
## itxn f
- Opcode: 0xb4 {uint8 transaction field index}
@@ -1340,6 +1342,15 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- LogicSigVersion >= 5
- Mode: Application
+## itxn_next
+
+- Opcode: 0xb6
+- Pops: _None_
+- Pushes: _None_
+- begin preparation of a new inner transaction in the same transaction group
+- LogicSigVersion >= 6
+- Mode: Application
+
## txnas f
- Opcode: 0xc0 {uint8 transaction field index}
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 58e597ecd..5b4d72b38 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -27,7 +27,6 @@ import (
"errors"
"fmt"
"io"
- "os"
"sort"
"strconv"
"strings"
@@ -472,13 +471,16 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one argument", spec.Name)
+ if len(args) == 0 {
+ return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
- val, _, err := parseBinaryArgs(args)
+ val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
+ if len(args) != consumed {
+ return ops.errorf("%s operation with extraneous argument", spec.Name)
+ }
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
vlen := binary.PutUvarint(scratch[:], uint64(len(val)))
@@ -636,12 +638,15 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte "this is a string\n"
func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.error("byte operation needs byte literal argument")
+ return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
- val, _, err := parseBinaryArgs(args)
+ val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
+ if len(args) != consumed {
+ return ops.errorf("%s operation with extraneous argument", spec.Name)
+ }
ops.ByteLiteral(val)
return nil
}
@@ -1290,8 +1295,8 @@ func typeDig(ops *OpStream, args []string) (StackTypes, StackTypes) {
idx := len(ops.typeStack) - depth
if idx >= 0 {
returns[len(returns)-1] = ops.typeStack[idx]
- for i := idx + 1; i < len(ops.typeStack); i++ {
- returns[i-idx-1] = ops.typeStack[i]
+ for i := idx; i < len(ops.typeStack); i++ {
+ returns[i-idx] = ops.typeStack[i]
}
}
return anys, returns
@@ -1588,6 +1593,7 @@ func (ops *OpStream) assemble(fin io.Reader) error {
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
+ line = strings.TrimSpace(line)
if len(line) == 0 {
ops.trace("%d: 0 line\n", ops.sourceLine)
continue
@@ -2102,19 +2108,27 @@ func (ops *OpStream) warnf(format string, a ...interface{}) error {
return ops.warn(fmt.Errorf(format, a...))
}
-// ReportProblems issues accumulated warnings and errors to stderr.
-func (ops *OpStream) ReportProblems(fname string) {
+// ReportProblems issues accumulated warnings and outputs errors to an io.Writer.
+func (ops *OpStream) ReportProblems(fname string, writer io.Writer) {
for i, e := range ops.Errors {
if i > 9 {
break
}
- fmt.Fprintf(os.Stderr, "%s: %s\n", fname, e)
+ if fname == "" {
+ fmt.Fprintf(writer, "%s\n", e)
+ } else {
+ fmt.Fprintf(writer, "%s: %s\n", fname, e)
+ }
}
for i, w := range ops.Warnings {
if i > 9 {
break
}
- fmt.Fprintf(os.Stderr, "%s: %s\n", fname, w)
+ if fname == "" {
+ fmt.Fprintf(writer, "%s\n", w)
+ } else {
+ fmt.Fprintf(writer, "%s: %s\n", fname, w)
+ }
}
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index bc61b83b2..1c78f9d40 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -134,7 +134,7 @@ intc 1
mulw
`
-const v2Nonsense = `
+const v2Nonsense = v1Nonsense + `
dup2
pop
pop
@@ -219,7 +219,7 @@ txn FreezeAssetAccount
txn FreezeAssetFrozen
`
-const v3Nonsense = `
+const v3Nonsense = v2Nonsense + `
assert
min_balance
int 0x031337 // get bit 1, negate it, put it back
@@ -248,7 +248,7 @@ pushbytes "john"
// Keep in mind, only use existing int and byte constants, or else use
// push* instead. The idea is to not cause the *cblocks to change.
-const v4Nonsense = `
+const v4Nonsense = v3Nonsense + `
int 1
pushint 2000
int 0
@@ -294,7 +294,7 @@ gaids
int 100
`
-const v5Nonsense = `
+const v5Nonsense = v4Nonsense + `
app_params_get AppExtraProgramPages
cover 1
uncover 1
@@ -342,12 +342,17 @@ ecdsa_pk_recover Secp256k1
itxna Logs 3
`
+const v6Nonsense = v5Nonsense + `
+itxn_next
+`
+
var nonsense = map[uint64]string{
1: v1Nonsense,
- 2: v1Nonsense + v2Nonsense,
- 3: v1Nonsense + v2Nonsense + v3Nonsense,
- 4: v1Nonsense + v2Nonsense + v3Nonsense + v4Nonsense,
- 5: v1Nonsense + v2Nonsense + v3Nonsense + v4Nonsense + v5Nonsense,
+ 2: v2Nonsense,
+ 3: v3Nonsense,
+ 4: v4Nonsense,
+ 5: v5Nonsense,
+ 6: v6Nonsense,
}
var compiled = map[uint64]string{
@@ -356,6 +361,7 @@ var compiled = map[uint64]string{
3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03",
+ 6: "062004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6",
}
func pseudoOp(opcode string) bool {
@@ -381,6 +387,7 @@ func TestAssemble(t *testing.T) {
// This doesn't have to be a sensible program to run, it just has to compile.
t.Parallel()
+ require.Equal(t, LogicVersion, len(nonsense))
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
for _, spec := range OpSpecs {
@@ -468,6 +475,9 @@ func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpSt
require.NoError(t, err)
require.Equal(t, ops.Program, ops2.Program)
} else {
+ if err == nil {
+ t.Log(program)
+ }
require.Error(t, err)
errors := ops.Errors
for _, exp := range expected {
@@ -500,6 +510,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpSt
}
func testLine(t *testing.T, line string, ver uint64, expected string) {
+ t.Helper()
// By embedding the source line between two other lines, the
// test for the correct line number in the error is more
// meaningful.
@@ -510,6 +521,7 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
}
testProg(t, source, ver, expect{2, expected})
}
+
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -654,6 +666,7 @@ func TestAssembleBytes(t *testing.T) {
variations := []string{
"byte b32 MFRGGZDFMY",
"byte base32 MFRGGZDFMY",
+ "byte base32 MFRGGZDFMY",
"byte base32(MFRGGZDFMY)",
"byte b32(MFRGGZDFMY)",
"byte b32 MFRGGZDFMY======",
@@ -672,6 +685,11 @@ func TestAssembleBytes(t *testing.T) {
expectedDefaultConsts := "0126010661626364656628"
expectedOptimizedConsts := "018006616263646566"
+ bad := [][]string{
+ {"byte", "...operation needs byte literal argument"},
+ {`byte "john" "doe"`, "...operation with extraneous argument"},
+ }
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
expected := expectedDefaultConsts
@@ -683,8 +701,19 @@ func TestAssembleBytes(t *testing.T) {
ops := testProg(t, vi, v)
s := hex.EncodeToString(ops.Program)
require.Equal(t, mutateProgVersion(v, expected), s)
+ // pushbytes should take the same input
+ if v >= 3 {
+ testProg(t, strings.Replace(vi, "byte", "pushbytes", 1), v)
+ }
}
+ for _, b := range bad {
+ testProg(t, b[0], v, expect{1, b[1]})
+ // pushbytes should produce the same errors
+ if v >= 3 {
+ testProg(t, strings.Replace(b[0], "byte", "pushbytes", 1), v, expect{1, b[1]})
+ }
+ }
})
}
}
@@ -1374,12 +1403,6 @@ func TestAssembleDisassembleCycle(t *testing.T) {
// Disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes.
t.Parallel()
- tests := map[uint64]string{
- 1: v1Nonsense,
- 2: v1Nonsense + v2Nonsense,
- 3: v1Nonsense + v2Nonsense + v3Nonsense,
- }
-
// This confirms that each program compiles to the same bytes
// (except the leading version indicator), when compiled under
// original version, unspecified version (so it should pick up
@@ -1388,7 +1411,8 @@ func TestAssembleDisassembleCycle(t *testing.T) {
// optimizations in later versions that change the bytecode
// emitted. But currently it is, so we test it for now to
// catch any suprises.
- for v, source := range tests {
+ require.Equal(t, LogicVersion, len(nonsense))
+ for v, source := range nonsense {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
@@ -1446,7 +1470,7 @@ func TestConstantArgs(t *testing.T) {
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
testProg(t, "pushint", v, expect{1, "pushint needs one argument"})
- testProg(t, "pushbytes", v, expect{1, "pushbytes needs one argument"})
+ testProg(t, "pushbytes", v, expect{1, "pushbytes operation needs byte literal argument"})
}
}
@@ -2058,6 +2082,9 @@ func TestPragmas(t *testing.T) {
testProg(t, "#pragma version", assemblerNoVersion,
expect{1, "no version value"})
+
+ ops = testProg(t, " #pragma version 5 ", assemblerNoVersion)
+ require.Equal(t, uint64(5), ops.Version)
}
func TestAssemblePragmaVersion(t *testing.T) {
@@ -2205,7 +2232,8 @@ func TestDigAsm(t *testing.T) {
// Confirm that digging something out does not ruin our knowledge about the types in the middle
testProg(t, "int 1; byte 0x1234; byte 0x1234; dig 2; dig 3; +; pop; +", AssemblerMaxVersion,
- expect{6, "+ arg 1..."})
+ expect{8, "+ arg 1..."})
+ testProg(t, "int 3; pushbytes \"123456\"; int 1; dig 2; substring3", AssemblerMaxVersion)
}
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 0e4a64664..5468d808c 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -169,9 +169,10 @@ var opDocByName = map[string]string{
"b~": "X with all bits inverted",
"log": "write bytes to log state of the current application",
- "itxn_begin": "Begin preparation of a new inner transaction",
- "itxn_field": "Set field F of the current inner transaction to X",
- "itxn_submit": "Execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails.",
+ "itxn_begin": "begin preparation of a new inner transaction in a new transaction group",
+ "itxn_next": "begin preparation of a new inner transaction in the same transaction group",
+ "itxn_field": "set field F of the current inner transaction to X",
+ "itxn_submit": "execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails.",
}
// OpDoc returns a description of the op
@@ -269,8 +270,8 @@ var opDocExtras = map[string]string{
"min_balance": "params: Before v4, Txn.Accounts offset. Since v4, Txn.Accounts offset or an account address that appears in Txn.Accounts or is Txn.Sender). Return: value.",
"app_opted_in": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
"app_local_get": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
- "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
- "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
"app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
"app_local_put": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key, value.",
"app_local_del": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
@@ -281,6 +282,7 @@ var opDocExtras = map[string]string{
"log": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.",
"itxn_begin": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the top-level transaction, and all other fields to zero values.",
"itxn_field": "`itxn_field` fails if X is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if X is an account or asset that does not appear in `txn.Accounts` or `txn.ForeignAssets` of the top-level transaction. (Setting addresses in asset creation are exempted from this requirement.)",
+ "itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
}
// OpDocExtra returns extra documentation text about an op
@@ -299,7 +301,7 @@ var OpGroups = map[string][]string{
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gaid", "gaids"},
"Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "log"},
- "Inner Transactions": {"itxn_begin", "itxn_field", "itxn_submit", "itxn", "itxna"},
+ "Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna"},
}
// OpCost indicates the cost of an operation over the range of
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index aa2744ddc..9a2530f36 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -349,8 +349,8 @@ type EvalContext struct {
version uint64
scratch scratchSpace
- subtxn *transactions.SignedTxn // place to build for itxn_submit
- // The transactions Performed() and their effects
+ subtxns []transactions.SignedTxn // place to build for itxn_submit
+ // Previous transactions Performed() and their effects
InnerTxns []transactions.SignedTxnWithAD
cost int // cost incurred so far
@@ -3676,33 +3676,68 @@ func authorizedSender(cx *EvalContext, addr basics.Address) bool {
return appAddr == authorizer
}
-func opTxBegin(cx *EvalContext) {
- if cx.subtxn != nil {
- cx.err = errors.New("itxn_begin without itxn_submit")
- return
- }
- // Start fresh
- cx.subtxn = &transactions.SignedTxn{}
- // Fill in defaults.
+// addInnerTxn appends a fresh SignedTxn to subtxns, populated with reasonable
+// defaults.
+func addInnerTxn(cx *EvalContext) error {
addr, err := cx.getApplicationAddress()
if err != nil {
- cx.err = err
- return
+ return err
}
- fee := cx.Proto.MinTxnFee
- if cx.FeeCredit != nil {
- // Use credit to shrink the fee, but don't change FeeCredit
- // here, because they might never itxn_submit, or they might
- // change the fee. Do it in itxn_submit.
- fee = basics.SubSaturate(fee, *cx.FeeCredit)
+ // For compatibility with v5, in which failures only occurred in the submit,
+ // we only fail here if we are OVER the MaxInnerTransactions limit. Thus
+ // this allows construction of one more Inner than is actually allowed, and
+ // will fail in submit. (But we do want the check here, so this can't become
+ // unbounded.) The MaxTxGroupSize check can be, and is, precise.
+ if len(cx.InnerTxns)+len(cx.subtxns) > cx.Proto.MaxInnerTransactions ||
+ len(cx.subtxns) >= cx.Proto.MaxTxGroupSize {
+ return errors.New("attempt to create too many inner transactions")
}
- cx.subtxn.Txn.Header = transactions.Header{
- Sender: addr, // Default, to simplify usage
+
+ stxn := transactions.SignedTxn{}
+
+ groupFee := basics.MulSaturate(cx.Proto.MinTxnFee, uint64(len(cx.subtxns)+1))
+ groupPaid := uint64(0)
+ for _, ptxn := range cx.subtxns {
+ groupPaid = basics.AddSaturate(groupPaid, ptxn.Txn.Fee.Raw)
+ }
+
+ fee := uint64(0)
+ if groupPaid < groupFee {
+ fee = groupFee - groupPaid
+
+ if cx.FeeCredit != nil {
+ // Use credit to shrink the default populated fee, but don't change
+ // FeeCredit here, because they might never itxn_submit, or they
+ // might change the fee. Do it in itxn_submit.
+ fee = basics.SubSaturate(fee, *cx.FeeCredit)
+ }
+ }
+
+ stxn.Txn.Header = transactions.Header{
+ Sender: addr,
Fee: basics.MicroAlgos{Raw: fee},
FirstValid: cx.Txn.Txn.FirstValid,
LastValid: cx.Txn.Txn.LastValid,
}
+ cx.subtxns = append(cx.subtxns, stxn)
+ return nil
+}
+
+func opTxBegin(cx *EvalContext) {
+ if len(cx.subtxns) > 0 {
+ cx.err = errors.New("itxn_begin without itxn_submit")
+ return
+ }
+ cx.err = addInnerTxn(cx)
+}
+
+func opTxNext(cx *EvalContext) {
+ if len(cx.subtxns) == 0 {
+ cx.err = errors.New("itxn_next without itxn_begin")
+ return
+ }
+ cx.err = addInnerTxn(cx)
}
// availableAccount is used instead of accountReference for more recent opcodes
@@ -3741,11 +3776,12 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
err = fmt.Errorf("Type arg not a byte array")
return
}
- txType, ok := innerTxnTypes[string(sv.Bytes)]
- if ok {
- txn.Type = txType
+ txType := string(sv.Bytes)
+ ver, ok := innerTxnTypes[txType]
+ if ok && ver <= cx.version {
+ txn.Type = protocol.TxType(txType)
} else {
- err = fmt.Errorf("%s is not a valid Type for itxn_field", sv.Bytes)
+ err = fmt.Errorf("%s is not a valid Type for itxn_field", txType)
}
case TypeEnum:
var i uint64
@@ -3755,9 +3791,9 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
}
// i != 0 is so that the error reports 0 instead of Unknown
if i != 0 && i < uint64(len(TxnTypeNames)) {
- txType, ok := innerTxnTypes[TxnTypeNames[i]]
- if ok {
- txn.Type = txType
+ ver, ok := innerTxnTypes[TxnTypeNames[i]]
+ if ok && ver <= cx.version {
+ txn.Type = protocol.TxType(TxnTypeNames[i])
} else {
err = fmt.Errorf("%s is not a valid Type for itxn_field", TxnTypeNames[i])
}
@@ -3768,14 +3804,48 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
txn.Sender, err = cx.availableAccount(sv)
case Fee:
txn.Fee.Raw, err = sv.uint()
- // FirstValid, LastValid unsettable: no motivation
- // Note unsettable: would be strange, as this "Note" would not end up "chain-visible"
+ // FirstValid, LastValid unsettable: little motivation (maybe a app call
+ // wants to inspect?) If we set, make sure they are legal, both for current
+ // round, and separation by MaxLifetime (check lifetime in submit, not here)
+ case Note:
+ if len(sv.Bytes) > cx.Proto.MaxTxnNoteBytes {
+ err = fmt.Errorf("%s may not exceed %d bytes", fs.field, cx.Proto.MaxTxnNoteBytes)
+ } else {
+ txn.Note = make([]byte, len(sv.Bytes))
+ copy(txn.Note[:], sv.Bytes)
+ }
// GenesisID, GenesisHash unsettable: surely makes no sense
// Group unsettable: Can't make groups from AVM (yet?)
// Lease unsettable: This seems potentially useful.
- // RekeyTo unsettable: Feels dangerous for first release.
- // KeyReg not allowed yet, so no fields settable
+ case RekeyTo:
+ txn.RekeyTo, err = sv.address()
+
+ // KeyReg
+ case VotePK:
+ if len(sv.Bytes) != 32 {
+ err = fmt.Errorf("%s must be 32 bytes", fs.field)
+ } else {
+ copy(txn.VotePK[:], sv.Bytes)
+ }
+ case SelectionPK:
+ if len(sv.Bytes) != 32 {
+ err = fmt.Errorf("%s must be 32 bytes", fs.field)
+ } else {
+ copy(txn.SelectionPK[:], sv.Bytes)
+ }
+ case VoteFirst:
+ var round uint64
+ round, err = sv.uint()
+ txn.VoteFirst = basics.Round(round)
+ case VoteLast:
+ var round uint64
+ round, err = sv.uint()
+ txn.VoteLast = basics.Round(round)
+ case VoteKeyDilution:
+ txn.VoteKeyDilution, err = sv.uint()
+ case Nonparticipation:
+ txn.Nonparticipation, err = sv.bool()
// Payment
case Receiver:
@@ -3820,7 +3890,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
txn.AssetParams.URL, err = sv.string(cx.Proto.MaxAssetURLBytes)
case ConfigAssetMetadataHash:
if len(sv.Bytes) != 32 {
- err = fmt.Errorf("ConfigAssetMetadataHash must be 32 bytes")
+ err = fmt.Errorf("%s must be 32 bytes", fs.field)
} else {
copy(txn.AssetParams.MetadataHash[:], sv.Bytes)
}
@@ -3849,7 +3919,8 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
}
func opTxField(cx *EvalContext) {
- if cx.subtxn == nil {
+ itx := len(cx.subtxns) - 1
+ if itx < 0 {
cx.err = errors.New("itxn_field without itxn_begin")
return
}
@@ -3857,10 +3928,11 @@ func opTxField(cx *EvalContext) {
field := TxnField(cx.program[cx.pc+1])
fs, ok := txnFieldSpecByField[field]
if !ok || fs.itxVersion == 0 || fs.itxVersion > cx.version {
- cx.err = fmt.Errorf("invalid itxn_field field %d", field)
+ cx.err = fmt.Errorf("invalid itxn_field %s", field)
+ return
}
sv := cx.stack[last]
- cx.err = cx.stackIntoTxnField(sv, fs, &cx.subtxn.Txn)
+ cx.err = cx.stackIntoTxnField(sv, fs, &cx.subtxns[itx].Txn)
cx.stack = cx.stack[:last] // pop
}
@@ -3870,71 +3942,95 @@ func opTxSubmit(cx *EvalContext) {
return
}
- if cx.subtxn == nil {
- cx.err = errors.New("itxn_submit without itxn_begin")
+ // Should never trigger, since itxn_next checks these too.
+ if len(cx.InnerTxns)+len(cx.subtxns) > cx.Proto.MaxInnerTransactions ||
+ len(cx.subtxns) > cx.Proto.MaxTxGroupSize {
+ cx.err = errors.New("too many inner transactions")
return
}
- if len(cx.InnerTxns) >= cx.Proto.MaxInnerTransactions {
- cx.err = errors.New("itxn_submit with MaxInnerTransactions")
+ if len(cx.subtxns) == 0 {
+ cx.err = errors.New("itxn_submit without itxn_begin")
return
}
- // Error out on anything unusual. Allow pay, axfer.
- switch cx.subtxn.Txn.Type {
- case protocol.PaymentTx, protocol.AssetTransferTx, protocol.AssetConfigTx, protocol.AssetFreezeTx:
- // only pay, axfer, acfg, afrz for now
- default:
- cx.err = fmt.Errorf("Invalid inner transaction type %#v", cx.subtxn.Txn.Type)
- return
+ // Check fees across the group first. Allows fee pooling in inner groups.
+ groupFee := basics.MulSaturate(cx.Proto.MinTxnFee, uint64(len(cx.subtxns)))
+ groupPaid := uint64(0)
+ for _, ptxn := range cx.subtxns {
+ groupPaid = basics.AddSaturate(groupPaid, ptxn.Txn.Fee.Raw)
}
-
- // The goal is to follow the same invariants used by the
- // transaction pool. Namely that any transaction that makes it
- // to Perform (which is equivalent to eval.applyTransaction)
- // is authorized, and WellFormed.
- if !authorizedSender(cx, cx.subtxn.Txn.Sender) {
- cx.err = fmt.Errorf("unauthorized")
- return
+ if groupPaid < groupFee {
+ // See if the FeeCredit is enough to cover the shortfall
+ shortfall := groupFee - groupPaid
+ if cx.FeeCredit == nil || *cx.FeeCredit < shortfall {
+ cx.err = fmt.Errorf("fee too small %#v", cx.subtxns)
+ return
+ }
+ *cx.FeeCredit -= shortfall
+ } else {
+ overpay := groupPaid - groupFee
+ if cx.FeeCredit == nil {
+ cx.FeeCredit = new(uint64)
+ }
+ *cx.FeeCredit = basics.AddSaturate(*cx.FeeCredit, overpay)
}
- // Recall that WellFormed does not care about individual
- // transaction fees because of fee pooling. So we check below.
- cx.err = cx.subtxn.Txn.WellFormed(*cx.Specials, *cx.Proto)
- if cx.err != nil {
- return
- }
+ for itx := range cx.subtxns {
+ // The goal is to follow the same invariants used by the
+ // transaction pool. Namely that any transaction that makes it
+ // to Perform (which is equivalent to eval.applyTransaction)
+ // is authorized, and WellFormed.
+ if !authorizedSender(cx, cx.subtxns[itx].Txn.Sender) {
+ cx.err = fmt.Errorf("unauthorized")
+ return
+ }
- paid := cx.subtxn.Txn.Fee.Raw
- if paid >= cx.Proto.MinTxnFee {
- // Over paying - accumulate into FeeCredit
- overpaid := paid - cx.Proto.MinTxnFee
- if cx.FeeCredit == nil {
- cx.FeeCredit = new(uint64)
+ // Recall that WellFormed does not care about individual
+ // transaction fees because of fee pooling. So we check below.
+ cx.err = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
+ if cx.err != nil {
+ return
}
- *cx.FeeCredit = basics.AddSaturate(*cx.FeeCredit, overpaid)
- } else {
- underpaid := cx.Proto.MinTxnFee - paid
- // Try to pay with FeeCredit, else fail.
- if cx.FeeCredit != nil && *cx.FeeCredit >= underpaid {
- *cx.FeeCredit -= underpaid
- } else {
- // We allow changing the fee. One pattern might be for an
- // app to unilaterally set its Fee to 0. The idea would be
- // that other transactions were supposed to overpay.
- cx.err = fmt.Errorf("fee too small")
+
+ ad, err := cx.Ledger.Perform(&cx.subtxns[itx].Txn, *cx.Specials)
+ if err != nil {
+ cx.err = err
return
}
+
+ cx.InnerTxns = append(cx.InnerTxns, transactions.SignedTxnWithAD{
+ SignedTxn: cx.subtxns[itx],
+ ApplyData: ad,
+ })
}
+ cx.subtxns = nil
+}
- ad, err := cx.Ledger.Perform(&cx.subtxn.Txn, *cx.Specials)
+// PcDetails return PC and disassembled instructions at PC up to 2 opcodes back
+func (cx *EvalContext) PcDetails() (pc int, dis string) {
+ const maxNumAdditionalOpcodes = 2
+ text, ds, err := disassembleInstrumented(cx.program, nil)
if err != nil {
- cx.err = err
- return
+ return cx.pc, dis
+ }
+
+ for i := 0; i < len(ds.pcOffset); i++ {
+ if ds.pcOffset[i].PC == cx.pc {
+ start := 0
+ if i >= maxNumAdditionalOpcodes {
+ start = i - maxNumAdditionalOpcodes
+ }
+
+ startTextPos := ds.pcOffset[start].Offset
+ endTextPos := len(text)
+ if i+1 < len(ds.pcOffset) {
+ endTextPos = ds.pcOffset[i+1].Offset
+ }
+
+ dis = text[startTextPos:endTextPos]
+ break
+ }
}
- cx.InnerTxns = append(cx.InnerTxns, transactions.SignedTxnWithAD{
- SignedTxn: *cx.subtxn,
- ApplyData: ad,
- })
- cx.subtxn = nil
+ return cx.pc, dis
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 693e1e82e..b24e10fc2 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -25,11 +25,18 @@ import (
"github.com/stretchr/testify/require"
)
-func TestActionTypes(t *testing.T) {
+func TestInnerTypesV5(t *testing.T) {
+ v5, _ := makeSampleEnvWithVersion(5)
+ // not alllowed in v5
+ testApp(t, "itxn_begin; byte \"keyreg\"; itxn_field Type; itxn_submit; int 1;", v5, "keyreg is not a valid Type for itxn_field")
+ testApp(t, "itxn_begin; int keyreg; itxn_field TypeEnum; itxn_submit; int 1;", v5, "keyreg is not a valid Type for itxn_field")
+}
+
+func TestCurrentInnerTypes(t *testing.T) {
ep, ledger := makeSampleEnv()
testApp(t, "itxn_submit; int 1;", ep, "itxn_submit without itxn_begin")
testApp(t, "int pay; itxn_field TypeEnum; itxn_submit; int 1;", ep, "itxn_field without itxn_begin")
- testApp(t, "itxn_begin; itxn_submit; int 1;", ep, "Invalid inner transaction type")
+ testApp(t, "itxn_begin; itxn_submit; int 1;", ep, "unknown tx type")
// bad type
testApp(t, "itxn_begin; byte \"pya\"; itxn_field Type; itxn_submit; int 1;", ep, "pya is not a valid Type")
// mixed up the int form for the byte form
@@ -37,11 +44,9 @@ func TestActionTypes(t *testing.T) {
// or vice versa
testApp(t, obfuscate("itxn_begin; byte \"pay\"; itxn_field TypeEnum; itxn_submit; int 1;"), ep, "not a uint64")
- // good types, not alllowed yet
- testApp(t, "itxn_begin; byte \"keyreg\"; itxn_field Type; itxn_submit; int 1;", ep, "keyreg is not a valid Type for itxn_field")
+ // good types, not allowed yet
testApp(t, "itxn_begin; byte \"appl\"; itxn_field Type; itxn_submit; int 1;", ep, "appl is not a valid Type for itxn_field")
// same, as enums
- testApp(t, "itxn_begin; int keyreg; itxn_field TypeEnum; itxn_submit; int 1;", ep, "keyreg is not a valid Type for itxn_field")
testApp(t, "itxn_begin; int appl; itxn_field TypeEnum; itxn_submit; int 1;", ep, "appl is not a valid Type for itxn_field")
testApp(t, "itxn_begin; int 42; itxn_field TypeEnum; itxn_submit; int 1;", ep, "42 is not a valid TypeEnum")
testApp(t, "itxn_begin; int 0; itxn_field TypeEnum; itxn_submit; int 1;", ep, "0 is not a valid TypeEnum")
@@ -58,6 +63,10 @@ func TestActionTypes(t *testing.T) {
testApp(t, "itxn_begin; int acfg; itxn_field TypeEnum; itxn_submit; int 1;", ep, "insufficient balance")
testApp(t, "itxn_begin; int afrz; itxn_field TypeEnum; itxn_submit; int 1;", ep, "insufficient balance")
+ // alllowed since v6
+ testApp(t, "itxn_begin; byte \"keyreg\"; itxn_field Type; itxn_submit; int 1;", ep, "insufficient balance")
+ testApp(t, "itxn_begin; int keyreg; itxn_field TypeEnum; itxn_submit; int 1;", ep, "insufficient balance")
+
// Establish 888 as the app id, and fund it.
ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
ledger.NewAccount(basics.AppIndex(888).Address(), 200000)
@@ -220,6 +229,30 @@ func TestRekeyPay(t *testing.T) {
// See explanation in logicLedger's Perform()
}
+func TestRekeyBack(t *testing.T) {
+ payAndUnkey := `
+ itxn_begin
+ itxn_field Amount
+ itxn_field Receiver
+ itxn_field Sender
+ int pay
+ itxn_field TypeEnum
+ txn Sender
+ itxn_field RekeyTo
+ itxn_submit
+`
+
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ testApp(t, "txn Sender; balance; int 0; ==;", ep)
+ testApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
+ ledger.NewAccount(ep.Txn.Txn.Sender, 120+3*ep.Proto.MinTxnFee)
+ ledger.Rekey(ep.Txn.Txn.Sender, basics.AppIndex(888).Address())
+ testApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey+"; int 1", ep)
+ // now rekeyed back to original
+ testApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
+}
+
func TestDefaultSender(t *testing.T) {
pay := `
itxn_begin
@@ -320,7 +353,7 @@ func TestExtraFields(t *testing.T) {
"non-zero fields for type axfer")
}
-func TestBadField(t *testing.T) {
+func TestBadFieldV5(t *testing.T) {
pay := `
itxn_begin
int 7; itxn_field AssetAmount;
@@ -334,12 +367,34 @@ func TestBadField(t *testing.T) {
itxn_submit
`
- ep, ledger := makeSampleEnv()
+ ep, ledger := makeSampleEnvWithVersion(5)
ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
testApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+pay, ep,
"invalid itxn_field RekeyTo")
}
+func TestBadField(t *testing.T) {
+ pay := `
+ itxn_begin
+ int 7; itxn_field AssetAmount;
+ itxn_field Amount
+ itxn_field Receiver
+ itxn_field Sender
+ int pay
+ itxn_field TypeEnum
+ txn Receiver
+ itxn_field RekeyTo // ALLOWED, since v6
+ int 10
+ itxn_field FirstValid // NOT ALLOWED
+ itxn_submit
+`
+
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ testApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+pay, ep,
+ "invalid itxn_field FirstValid")
+}
+
func TestNumInner(t *testing.T) {
pay := `
itxn_begin
@@ -360,7 +415,7 @@ func TestNumInner(t *testing.T) {
testApp(t, pay+pay+pay+";int 1", ep)
testApp(t, pay+pay+pay+pay+";int 1", ep)
// In the sample proto, MaxInnerTransactions = 4
- testApp(t, pay+pay+pay+pay+pay+";int 1", ep, "itxn_submit with MaxInnerTransactions")
+ testApp(t, pay+pay+pay+pay+pay+";int 1", ep, "too many inner transactions")
}
func TestAssetCreate(t *testing.T) {
@@ -434,3 +489,94 @@ func TestAssetFreeze(t *testing.T) {
require.NoError(t, err)
require.Equal(t, false, holding.Frozen)
}
+
+func TestFieldSetting(t *testing.T) {
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(ledger.ApplicationID().Address(), 10*defaultEvalProto().MinTxnFee)
+ testApp(t, "itxn_begin; int 500; bzero; itxn_field Note; int 1", ep)
+ testApp(t, "itxn_begin; int 501; bzero; itxn_field Note; int 1", ep,
+ "Note may not exceed")
+
+ testApp(t, "itxn_begin; int 32; bzero; itxn_field VotePK; int 1", ep)
+ testApp(t, "itxn_begin; int 31; bzero; itxn_field VotePK; int 1", ep,
+ "VotePK must be 32")
+
+ testApp(t, "itxn_begin; int 32; bzero; itxn_field SelectionPK; int 1", ep)
+ testApp(t, "itxn_begin; int 33; bzero; itxn_field SelectionPK; int 1", ep,
+ "SelectionPK must be 32")
+
+ testApp(t, "itxn_begin; int 32; bzero; itxn_field RekeyTo; int 1", ep)
+ testApp(t, "itxn_begin; int 31; bzero; itxn_field RekeyTo; int 1", ep,
+ "not an address")
+
+ testApp(t, "itxn_begin; int 6; bzero; itxn_field ConfigAssetUnitName; int 1", ep)
+ testApp(t, "itxn_begin; int 7; bzero; itxn_field ConfigAssetUnitName; int 1", ep,
+ "value is too long")
+
+ testApp(t, "itxn_begin; int 12; bzero; itxn_field ConfigAssetName; int 1", ep)
+ testApp(t, "itxn_begin; int 13; bzero; itxn_field ConfigAssetName; int 1", ep,
+ "value is too long")
+}
+
+func TestInnerGroup(t *testing.T) {
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ // Need both fees and both payments
+ ledger.NewAccount(ledger.ApplicationID().Address(), 999+2*defaultEvalProto().MinTxnFee)
+ pay := `
+int pay; itxn_field TypeEnum;
+int 500; itxn_field Amount;
+txn Sender; itxn_field Receiver;
+`
+ testApp(t, "itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep,
+ "insufficient balance")
+
+ // NewAccount overwrites the existing balance
+ ledger.NewAccount(ledger.ApplicationID().Address(), 1000+2*defaultEvalProto().MinTxnFee)
+ testApp(t, "itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep)
+}
+
+func TestInnerFeePooling(t *testing.T) {
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(ledger.ApplicationID().Address(), 50_000)
+ pay := `
+int pay; itxn_field TypeEnum;
+int 500; itxn_field Amount;
+txn Sender; itxn_field Receiver;
+`
+ // Force the first fee to 3, but the second will default to 2*fee-3 = 2002-3
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 3; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "itxn_submit; itxn Fee; int 1999; ==", ep)
+
+ // Same first, but force the second too low
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 3; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "int 1998; itxn_field Fee;"+
+ "itxn_submit; int 1", ep, "fee too small")
+
+ // Overpay in first itxn, the second will default to less
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 2000; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "itxn_submit; itxn Fee; int 2; ==", ep)
+
+ // Same first, but force the second too low
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 2000; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "int 1; itxn_field Fee;"+
+ "itxn_submit; itxn Fee; int 1", ep, "fee too small")
+}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index f270bd541..5a212629c 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -2493,7 +2493,7 @@ func TestPooledAppCallsVerifyOp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := `#pragma version 5
+ source := `
global CurrentApplicationID
pop
byte 0x01
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 3e1fe56af..74b9f931c 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -64,6 +64,7 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
SchemaBytesMinBalance: 1005,
MaxInnerTransactions: 4,
+ MaxTxGroupSize: 8,
// With the addition of itxn_field, itxn_submit, which rely on
// machinery outside logic package for validity checking, we
@@ -73,6 +74,14 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
MaxAssetUnitNameBytes: 6,
MaxAssetURLBytes: 32,
MaxAssetDecimals: 4,
+ SupportRekeying: true,
+ MaxTxnNoteBytes: 500,
+ EnableFeePooling: true,
+
+ // Chosen to be different from one another and from normal proto
+ MaxAppTxnAccounts: 3,
+ MaxAppTxnForeignApps: 5,
+ MaxAppTxnForeignAssets: 6,
}
}
@@ -999,6 +1008,10 @@ byte 0x0706000000000000000000000000000000000000000000000000000000000000
&&
`
+const globalV6TestProgram = globalV5TestProgram + `
+// No new globals in v6
+`
+
func TestGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1028,6 +1041,10 @@ func TestGlobal(t *testing.T) {
GroupID, globalV5TestProgram,
EvalStateful, CheckStateful,
},
+ 6: {
+ GroupID, globalV6TestProgram,
+ EvalStateful, CheckStateful,
+ },
}
// tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
@@ -1439,6 +1456,12 @@ assert
int 1
`
+const testTxnProgramTextV6 = testTxnProgramTextV5 + `
+assert
+
+int 1
+`
+
func makeSampleTxn() transactions.SignedTxn {
var txn transactions.SignedTxn
copy(txn.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
@@ -1526,7 +1549,7 @@ func TestTxn(t *testing.T) {
t.Parallel()
for i, txnField := range TxnFieldNames {
fs := txnFieldSpecByField[TxnField(i)]
- if !fs.effects && !strings.Contains(testTxnProgramTextV5, txnField) {
+ if !fs.effects && !strings.Contains(testTxnProgramTextV6, txnField) {
if txnField != FirstValidTime.String() {
t.Errorf("TestTxn missing field %v", txnField)
}
@@ -1539,6 +1562,7 @@ func TestTxn(t *testing.T) {
3: testTxnProgramTextV3,
4: testTxnProgramTextV4,
5: testTxnProgramTextV5,
+ 6: testTxnProgramTextV6,
}
clearOps := testProg(t, "int 1", 1)
@@ -1823,11 +1847,15 @@ gtxn 0 Sender
&&
`
+ gtxnTextV6 := gtxnTextV5 + `
+`
+
tests := map[uint64]string{
1: gtxnTextV1,
2: gtxnTextV2,
4: gtxnTextV4,
5: gtxnTextV5,
+ 6: gtxnTextV6,
}
for v, source := range tests {
@@ -4842,3 +4870,42 @@ func TestLog(t *testing.T) {
require.False(t, pass)
}
}
+
+func TestPcDetails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var tests = []struct {
+ source string
+ pc int
+ det string
+ }{
+ {"int 1; int 2; -", 5, "pushint 1\npushint 2\n-\n"},
+ {"int 1; err", 3, "pushint 1\nerr\n"},
+ {"int 1; dup; int 2; -; +", 6, "dup\npushint 2\n-\n"},
+ {"b end; end:", 4, ""},
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
+ ops := testProg(t, test.source, LogicVersion)
+ txn := makeSampleTxn()
+ txgroup := makeSampleTxnGroup(txn)
+ txn.Lsig.Logic = ops.Program
+ sb := strings.Builder{}
+ ep := defaultEvalParams(&sb, &txn)
+ ep.TxnGroup = txgroup
+
+ var cx EvalContext
+ cx.EvalParams = ep
+ cx.runModeFlags = runModeSignature
+
+ pass, err := eval(ops.Program, &cx)
+ require.Error(t, err)
+ require.False(t, pass)
+
+ pc, det := cx.PcDetails()
+ require.Equal(t, test.pc, pc)
+ require.Equal(t, test.det, det)
+ })
+ }
+}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 32d29fd85..68ae0f12f 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -197,16 +197,16 @@ var txnFieldSpecs = []txnFieldSpec{
{FirstValid, StackUint64, 0, 0, false},
{FirstValidTime, StackUint64, 0, 0, false},
{LastValid, StackUint64, 0, 0, false},
- {Note, StackBytes, 0, 0, false},
+ {Note, StackBytes, 0, 6, false},
{Lease, StackBytes, 0, 0, false},
{Receiver, StackBytes, 0, 5, false},
{Amount, StackUint64, 0, 5, false},
{CloseRemainderTo, StackBytes, 0, 5, false},
- {VotePK, StackBytes, 0, 0, false},
- {SelectionPK, StackBytes, 0, 0, false},
- {VoteFirst, StackUint64, 0, 0, false},
- {VoteLast, StackUint64, 0, 0, false},
- {VoteKeyDilution, StackUint64, 0, 0, false},
+ {VotePK, StackBytes, 0, 6, false},
+ {SelectionPK, StackBytes, 0, 6, false},
+ {VoteFirst, StackUint64, 0, 6, false},
+ {VoteLast, StackUint64, 0, 6, false},
+ {VoteKeyDilution, StackUint64, 0, 6, false},
{Type, StackBytes, 0, 5, false},
{TypeEnum, StackUint64, 0, 5, false},
{XferAsset, StackUint64, 0, 5, false},
@@ -224,7 +224,7 @@ var txnFieldSpecs = []txnFieldSpec{
{NumAccounts, StackUint64, 2, 0, false},
{ApprovalProgram, StackBytes, 2, 0, false},
{ClearStateProgram, StackBytes, 2, 0, false},
- {RekeyTo, StackBytes, 2, 0, false},
+ {RekeyTo, StackBytes, 2, 6, false},
{ConfigAsset, StackUint64, 2, 5, false},
{ConfigAssetTotal, StackUint64, 2, 5, false},
{ConfigAssetDecimals, StackUint64, 2, 5, false},
@@ -249,7 +249,7 @@ var txnFieldSpecs = []txnFieldSpec{
{LocalNumUint, StackUint64, 3, 0, false},
{LocalNumByteSlice, StackUint64, 3, 0, false},
{ExtraProgramPages, StackUint64, 4, 0, false},
- {Nonparticipation, StackUint64, 5, 0, false},
+ {Nonparticipation, StackUint64, 5, 6, false},
{Logs, StackBytes, 5, 5, true},
{NumLogs, StackUint64, 5, 5, true},
@@ -279,11 +279,12 @@ var txnaFieldSpecByField = map[TxnField]txnFieldSpec{
Logs: {Logs, StackBytes, 5, 5, true},
}
-var innerTxnTypes = map[string]protocol.TxType{
- string(protocol.PaymentTx): protocol.PaymentTx,
- string(protocol.AssetTransferTx): protocol.AssetTransferTx,
- string(protocol.AssetConfigTx): protocol.AssetConfigTx,
- string(protocol.AssetFreezeTx): protocol.AssetFreezeTx,
+var innerTxnTypes = map[string]uint64{
+ string(protocol.PaymentTx): 5,
+ string(protocol.KeyRegistrationTx): 6,
+ string(protocol.AssetTransferTx): 5,
+ string(protocol.AssetConfigTx): 5,
+ string(protocol.AssetFreezeTx): 5,
}
// TxnTypeNames is the values of Txn.Type in enum order
@@ -638,7 +639,7 @@ func init() {
txnFieldSpecByField = make(map[TxnField]txnFieldSpec, len(TxnFieldNames))
for i, s := range txnFieldSpecs {
if int(s.field) != i {
- panic("txnFieldTypePairs disjoint with TxnField enum")
+ panic("txnFieldSpecs disjoint with TxnField enum")
}
TxnFieldTypes[i] = s.ftype
txnFieldSpecByField[s.field] = s
@@ -654,8 +655,11 @@ func init() {
}
GlobalFieldTypes = make([]StackType, len(GlobalFieldNames))
globalFieldSpecByField = make(map[GlobalField]globalFieldSpec, len(GlobalFieldNames))
- for _, s := range globalFieldSpecs {
- GlobalFieldTypes[int(s.field)] = s.ftype
+ for i, s := range globalFieldSpecs {
+ if int(s.field) != i {
+ panic("globalFieldSpecs disjoint with GlobalField enum")
+ }
+ GlobalFieldTypes[i] = s.ftype
globalFieldSpecByField[s.field] = s
}
globalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index d45dd25fb..604db789a 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -21,7 +21,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 5
+const LogicVersion = 6
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -317,6 +317,7 @@ var OpSpecs = []OpSpec{
{0xb3, "itxn_submit", opTxSubmit, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
{0xb4, "itxn", opItxn, asmItxn, disTxn, nil, oneAny, 5, runModeApplication, immediates("f")},
{0xb5, "itxna", opItxna, asmItxna, disTxna, nil, oneAny, 5, runModeApplication, immediates("f", "i")},
+ {0xb6, "itxn_next", opTxNext, asmDefault, disDefault, nil, nil, 6, runModeApplication, opDefault},
// Dynamic indexing
{0xc0, "txnas", opTxnas, assembleTxnas, disTxn, oneInt, oneAny, 5, modeAny, immediates("f")},
diff --git a/data/transactions/logictest/ledger.go b/data/transactions/logictest/ledger.go
index 5d75b210f..51ed94124 100644
--- a/data/transactions/logictest/ledger.go
+++ b/data/transactions/logictest/ledger.go
@@ -549,6 +549,23 @@ func (l *Ledger) move(from basics.Address, to basics.Address, amount uint64) err
return nil
}
+func (l *Ledger) rekey(tx *transactions.Transaction) error {
+ // rekeying: update br.auth to tx.RekeyTo if provided
+ if (tx.RekeyTo != basics.Address{}) {
+ br, ok := l.balances[tx.Sender]
+ if !ok {
+ return fmt.Errorf("no account")
+ }
+ if tx.RekeyTo == tx.Sender {
+ br.auth = basics.Address{}
+ } else {
+ br.auth = tx.RekeyTo
+ }
+ l.balances[tx.Sender] = br
+ }
+ return nil
+}
+
func (l *Ledger) pay(from basics.Address, pay transactions.PaymentTxnFields) error {
err := l.move(from, pay.Receiver, pay.Amount.Raw)
if err != nil {
@@ -706,6 +723,12 @@ func (l *Ledger) Perform(txn *transactions.Transaction, spec transactions.Specia
if err != nil {
return ad, err
}
+
+ err = l.rekey(txn)
+ if err != nil {
+ return ad, err
+ }
+
switch txn.Type {
case protocol.PaymentTx:
err = l.pay(txn.Sender, txn.PaymentTxnFields)
diff --git a/data/transactions/teal.go b/data/transactions/teal.go
index e2f6718b7..0826290b9 100644
--- a/data/transactions/teal.go
+++ b/data/transactions/teal.go
@@ -37,9 +37,6 @@ type EvalDelta struct {
Logs []string `codec:"lg,allocbound=config.MaxLogCalls"`
- // Intentionally, temporarily wrong - need to decide how to
- // allocbound properly when structure is recursive. Even a bound
- // of 2 would allow arbitrarily large object if deep.
InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=config.MaxInnerTransactions"`
}
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
index 5d3a6d77f..063f085bf 100644
--- a/data/txntest/txn.go
+++ b/data/txntest/txn.go
@@ -51,7 +51,7 @@ type Txn struct {
Type protocol.TxType
Sender basics.Address
- Fee uint64
+ Fee interface{} // basics.MicroAlgos, uint64, int, or nil
FirstValid basics.Round
LastValid basics.Round
Note []byte
@@ -113,7 +113,7 @@ func (tx *Txn) Noted(note string) *Txn {
// FillDefaults populates some obvious defaults from config params,
// unless they have already been set.
func (tx *Txn) FillDefaults(params config.ConsensusParams) {
- if tx.Fee == 0 {
+ if tx.Fee == nil {
tx.Fee = params.MinTxnFee
}
if tx.LastValid == 0 {
@@ -146,11 +146,23 @@ func assemble(source string) []byte {
// Txn produces a transactions.Transaction from the fields in this Txn
func (tx Txn) Txn() transactions.Transaction {
+ switch fee := tx.Fee.(type) {
+ case basics.MicroAlgos:
+ // nothing, already have MicroAlgos
+ case uint64:
+ tx.Fee = basics.MicroAlgos{Raw: fee}
+ case int:
+ if fee >= 0 {
+ tx.Fee = basics.MicroAlgos{Raw: uint64(fee)}
+ }
+ case nil:
+ tx.Fee = basics.MicroAlgos{}
+ }
return transactions.Transaction{
Type: tx.Type,
Header: transactions.Header{
Sender: tx.Sender,
- Fee: basics.MicroAlgos{Raw: tx.Fee},
+ Fee: tx.Fee.(basics.MicroAlgos),
FirstValid: tx.FirstValid,
LastValid: tx.LastValid,
Note: tx.Note,
diff --git a/go.mod b/go.mod
index e78060cd2..00a9719d6 100644
--- a/go.mod
+++ b/go.mod
@@ -8,8 +8,10 @@ require (
github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64
github.com/algorand/msgp v1.1.48
github.com/algorand/oapi-codegen v1.3.5-algorand5
- github.com/algorand/websocket v1.4.2
+ github.com/algorand/websocket v1.4.4
+ github.com/algorand/xorfilter v0.2.0
github.com/aws/aws-sdk-go v1.16.5
+ github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
github.com/cpuguy83/go-md2man v1.0.8 // indirect
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
@@ -24,7 +26,6 @@ require (
github.com/gopherjs/gopherwasm v1.0.1 // indirect
github.com/gorilla/context v1.1.1 // indirect
github.com/gorilla/mux v1.6.2
- github.com/gorilla/schema v1.0.2
github.com/gorilla/websocket v1.4.2 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.2.0
@@ -45,7 +46,7 @@ require (
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/net v0.0.0-20200904194848-62affa334b73
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
- golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f
+ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
diff --git a/go.sum b/go.sum
index c4fe5d039..0ac28a945 100644
--- a/go.sum
+++ b/go.sum
@@ -10,10 +10,14 @@ github.com/algorand/msgp v1.1.48 h1:5P+gVmTnk0m37r+rA3ZsFZW219ZqmCLulW5f8Z+3nx8=
github.com/algorand/msgp v1.1.48/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
-github.com/algorand/websocket v1.4.2 h1:zMB7ukz+c7tcef8rVqmKQTv6KQtxXtCFuiAqKaE7n9I=
-github.com/algorand/websocket v1.4.2/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
+github.com/algorand/websocket v1.4.4 h1:BL9atWs/7tkV73NCwiLZ5YqDENMBsSxozc5gDtPdsQ4=
+github.com/algorand/websocket v1.4.4/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
+github.com/algorand/xorfilter v0.2.0 h1:YC31ANxdZ2jmtbwqv1+USskVSqjkeiRZcQGc6//ro9Q=
+github.com/algorand/xorfilter v0.2.0/go.mod h1:f5cJsYrFbJhXkbjnV4odJB44np05/PvwvdBnABnQoUs=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
+github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4 h1:Fphwr1XDjkTR/KFbrrkLfY6D2CEOlHqFGomQQrxcHFs=
@@ -184,6 +188,8 @@ golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
diff --git a/installer/config.json.example b/installer/config.json.example
index 42b6361bd..fac112201 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 16,
+ "Version": 19,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AnnounceParticipationKey": true,
@@ -44,11 +44,13 @@
"EnableProfiler": false,
"EnableRequestLogger": false,
"EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
"FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
"ForceRelayMessages": false,
"GossipFanout": 4,
- "IncomingConnectionsLimit": 10000,
+ "IncomingConnectionsLimit": 800,
"IncomingMessageFilterBucketCount": 5,
"IncomingMessageFilterBucketSize": 512,
"IsIndexerActive": false,
@@ -71,6 +73,7 @@
"PeerConnectionsUpdateInterval": 3600,
"PeerPingPeriodSeconds": 0,
"PriorityPeers": {},
+ "ProposalAssemblyTime": 250000000,
"PublicAddress": "",
"ReconnectTime": 60000000000,
"ReservedFDs": 256,
@@ -82,6 +85,8 @@
"TLSCertFile": "",
"TLSKeyFile": "",
"TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
"TxPoolExponentialIncreaseFactor": 2,
"TxPoolSize": 15000,
"TxSyncIntervalSeconds": 60,
diff --git a/ledger/README.md b/ledger/README.md
index 999950ee5..37d6baed7 100644
--- a/ledger/README.md
+++ b/ledger/README.md
@@ -97,11 +97,6 @@ locking.
- `Totals(round)` returns the totals of accounts, using the account
tracker.
-### Time tracker
-
-- `Timestamp(round)` uses the time tracker to return the time as
- of `round`.
-
### Recent transactions tracker
- `Committed(txnid)` returns whether `txid` has been recently committed,
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 1a8b6f973..2b12b42c2 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -682,8 +682,8 @@ func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, address
// the full AccountData because we need to store a large number of these
// in memory (say, 1M), and storing that many AccountData could easily
// cause us to run out of memory.
-func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *onlineAccount {
- return &onlineAccount{
+func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *ledgercore.OnlineAccount {
+ return &ledgercore.OnlineAccount{
Address: address,
MicroAlgos: ad.MicroAlgos,
RewardsBase: ad.RewardsBase,
@@ -711,14 +711,18 @@ func accountsReset(tx *sql.Tx) error {
return err
}
-// accountsRound returns the tracker balances round number, and the round of the hash tree
-// if the hash of the tree doesn't exists, it returns zero.
-func accountsRound(tx *sql.Tx) (rnd basics.Round, hashrnd basics.Round, err error) {
+// accountsRound returns the tracker balances round number
+func accountsRound(tx *sql.Tx) (rnd basics.Round, err error) {
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd)
if err != nil {
return
}
+ return
+}
+// accountsHashRound returns the round of the hash tree
+// if the hash of the tree doesn't exists, it returns zero.
+func accountsHashRound(tx *sql.Tx) (hashrnd basics.Round, err error) {
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='hashbase'").Scan(&hashrnd)
if err == sql.ErrNoRows {
hashrnd = basics.Round(0)
@@ -727,7 +731,7 @@ func accountsRound(tx *sql.Tx) (rnd basics.Round, hashrnd basics.Round, err erro
return
}
-func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
+func accountsInitDbQueries(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
var err error
qs := &accountsDbQueries{}
@@ -1009,14 +1013,14 @@ func (qs *accountsDbQueries) close() {
//
// Note that this does not check if the accounts have a vote key valid for any
// particular round (past, present, or future).
-func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*onlineAccount, error) {
+func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) {
rows, err := tx.Query("SELECT address, data FROM accountbase WHERE normalizedonlinebalance>0 ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?", n, offset)
if err != nil {
return nil, err
}
defer rows.Close()
- res := make(map[basics.Address]*onlineAccount, n)
+ res := make(map[basics.Address]*ledgercore.OnlineAccount, n)
for rows.Next() {
var addrbuf []byte
var buf []byte
@@ -1181,54 +1185,8 @@ func accountsNewRound(tx *sql.Tx, updates compactAccountDeltas, creatables map[b
return
}
-// totalsNewRounds updates the accountsTotals by applying series of round changes
-func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, proto config.ConsensusParams) (err error) {
- var ot basics.OverflowTracker
- totals, err := accountsTotals(tx, false)
- if err != nil {
- return
- }
-
- // copy the updates base account map, since we don't want to modify the input map.
- accounts := make(map[basics.Address]basics.AccountData, compactUpdates.len())
- for i := 0; i < compactUpdates.len(); i++ {
- addr, acctData := compactUpdates.getByIdx(i)
- accounts[addr] = acctData.old.accountData
- }
-
- for i := 0; i < len(updates); i++ {
- totals.ApplyRewards(accountTotals[i].RewardsLevel, &ot)
-
- for j := 0; j < updates[i].Len(); j++ {
- addr, data := updates[i].GetByIdx(j)
-
- if oldAccountData, has := accounts[addr]; has {
- totals.DelAccount(proto, oldAccountData, &ot)
- } else {
- err = fmt.Errorf("missing old account data")
- return
- }
-
- totals.AddAccount(proto, data, &ot)
- accounts[addr] = data
- }
- }
-
- if ot.Overflowed {
- err = fmt.Errorf("overflow computing totals")
- return
- }
-
- err = accountsPutTotals(tx, totals, false)
- if err != nil {
- return
- }
-
- return
-}
-
// updates the round number associated with the current account data.
-func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (err error) {
+func updateAccountsRound(tx *sql.Tx, rnd basics.Round) (err error) {
res, err := tx.Exec("UPDATE acctrounds SET rnd=? WHERE id='acctbase' AND rnd<?", rnd, rnd)
if err != nil {
return
@@ -1254,13 +1212,17 @@ func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (
return
}
}
+ return
+}
- res, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
+// updates the round number associated with the hash of current account data.
+func updateAccountsHashRound(tx *sql.Tx, hashRound basics.Round) (err error) {
+ res, err := tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
if err != nil {
return
}
- aff, err = res.RowsAffected()
+ aff, err := res.RowsAffected()
if err != nil {
return
}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 38dcf996b..ab64fe48f 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -35,320 +35,19 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
)
-func randomAddress() basics.Address {
- var addr basics.Address
- crypto.RandBytes(addr[:])
- return addr
-}
-
-func randomNote() []byte {
- var note [16]byte
- crypto.RandBytes(note[:])
- return note[:]
-}
-
-func randomAccountData(rewardsLevel uint64) basics.AccountData {
- var data basics.AccountData
-
- // Avoid overflowing totals
- data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
-
- switch crypto.RandUint64() % 3 {
- case 0:
- data.Status = basics.Online
- case 1:
- data.Status = basics.Offline
- default:
- data.Status = basics.NotParticipating
- }
-
- data.RewardsBase = rewardsLevel
- data.VoteFirstValid = 0
- data.VoteLastValid = 1000
- return data
-}
-
-func randomFullAccountData(rewardsLevel, lastCreatableID uint64) (basics.AccountData, uint64) {
- data := randomAccountData(rewardsLevel)
-
- crypto.RandBytes(data.VoteID[:])
- crypto.RandBytes(data.SelectionID[:])
- data.VoteFirstValid = basics.Round(crypto.RandUint64())
- data.VoteLastValid = basics.Round(crypto.RandUint64())
- data.VoteKeyDilution = crypto.RandUint64()
- if 1 == (crypto.RandUint64() % 2) {
- // if account has created assets, have these defined.
- data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
- createdAssetsCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < createdAssetsCount; i++ {
- ap := basics.AssetParams{
- Total: crypto.RandUint64(),
- Decimals: uint32(crypto.RandUint64() % 20),
- DefaultFrozen: (crypto.RandUint64()%2 == 0),
- UnitName: fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff)),
- AssetName: fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff)),
- URL: fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff)),
- }
- crypto.RandBytes(ap.MetadataHash[:])
- crypto.RandBytes(ap.Manager[:])
- crypto.RandBytes(ap.Reserve[:])
- crypto.RandBytes(ap.Freeze[:])
- crypto.RandBytes(ap.Clawback[:])
- lastCreatableID++
- data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
- }
- }
- if 1 == (crypto.RandUint64()%2) && lastCreatableID > 0 {
- // if account owns assets
- data.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
- ownedAssetsCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < ownedAssetsCount; i++ {
- ah := basics.AssetHolding{
- Amount: crypto.RandUint64(),
- Frozen: (crypto.RandUint64()%2 == 0),
- }
- data.Assets[basics.AssetIndex(crypto.RandUint64()%lastCreatableID)] = ah
- }
- }
- if 1 == (crypto.RandUint64() % 5) {
- crypto.RandBytes(data.AuthAddr[:])
- }
-
- if 1 == (crypto.RandUint64()%3) && lastCreatableID > 0 {
- data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
- appStatesCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < appStatesCount; i++ {
- ap := basics.AppLocalState{
- Schema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- KeyValue: make(map[string]basics.TealValue),
- }
-
- for i := uint64(0); i < ap.Schema.NumUint; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- ap.KeyValue[appName] = basics.TealValue{
- Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
- }
- }
- for i := uint64(0); i < ap.Schema.NumByteSlice; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
- }
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(appName)))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ap.KeyValue[appName] = tv
- }
- if len(ap.KeyValue) == 0 {
- ap.KeyValue = nil
- }
- data.AppLocalStates[basics.AppIndex(crypto.RandUint64()%lastCreatableID)] = ap
- }
- }
-
- if 1 == (crypto.RandUint64() % 3) {
- data.TotalAppSchema = basics.StateSchema{
- NumUint: crypto.RandUint64() % 50,
- NumByteSlice: crypto.RandUint64() % 50,
- }
- }
- if 1 == (crypto.RandUint64() % 3) {
- data.AppParams = make(map[basics.AppIndex]basics.AppParams)
- appParamsCount := crypto.RandUint64()%5 + 1
- for i := uint64(0); i < appParamsCount; i++ {
- ap := basics.AppParams{
- ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- GlobalState: make(basics.TealKeyValue),
- StateSchemas: basics.StateSchemas{
- LocalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- GlobalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- },
- }
- if len(ap.ApprovalProgram) > 0 {
- crypto.RandBytes(ap.ApprovalProgram[:])
- } else {
- ap.ApprovalProgram = nil
- }
- if len(ap.ClearStateProgram) > 0 {
- crypto.RandBytes(ap.ClearStateProgram[:])
- } else {
- ap.ClearStateProgram = nil
- }
-
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumUint+ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- ap.GlobalState[appName] = basics.TealValue{
- Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
- }
- }
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumByteSlice+ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
- }
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ap.GlobalState[appName] = tv
- }
- if len(ap.GlobalState) == 0 {
- ap.GlobalState = nil
- }
- lastCreatableID++
- data.AppParams[basics.AppIndex(lastCreatableID)] = ap
- }
-
- }
- return data, lastCreatableID
-}
-
-func randomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.AccountData {
- res := make(map[basics.Address]basics.AccountData)
- if simpleAccounts {
- for i := 0; i < niter; i++ {
- res[randomAddress()] = randomAccountData(0)
- }
- } else {
- lastCreatableID := crypto.RandUint64() % 512
- for i := 0; i < niter; i++ {
- res[randomAddress()], lastCreatableID = randomFullAccountData(0, lastCreatableID)
- }
- }
- return res
-}
-
-func randomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64) {
- updates, totals, imbalance, _ = randomDeltasImpl(niter, base, rewardsLevel, true, 0)
- return
-}
-
-func randomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
- updates, totals, imbalance, lastCreatableID = randomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
- return
-}
-
-func randomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- totals = make(map[basics.Address]basics.AccountData)
-
- // copy base -> totals
- for addr, data := range base {
- totals[addr] = data
- }
-
- // if making a full delta then need to determine max asset/app id to get rid of conflicts
- lastCreatableID = lastCreatableIDIn
- if !simple {
- for _, ad := range base {
- for aid := range ad.AssetParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- }
- for aid := range ad.AppParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- }
- }
- }
-
- // Change some existing accounts
- {
- i := 0
- for addr, old := range base {
- if i >= len(base)/2 || i >= niter {
- break
- }
-
- if addr == testPoolAddr {
- continue
- }
- i++
-
- var new basics.AccountData
- if simple {
- new = randomAccountData(rewardsLevel)
- } else {
- new, lastCreatableID = randomFullAccountData(rewardsLevel, lastCreatableID)
- }
- updates.Upsert(addr, new)
- imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
- totals[addr] = new
- break
- }
- }
-
- // Change some new accounts
- for i := 0; i < niter; i++ {
- addr := randomAddress()
- old := totals[addr]
- var new basics.AccountData
- if simple {
- new = randomAccountData(rewardsLevel)
- } else {
- new, lastCreatableID = randomFullAccountData(rewardsLevel, lastCreatableID)
- }
- updates.Upsert(addr, new)
- imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
- totals[addr] = new
- }
-
- return
-}
-
-func randomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData) {
- updates, totals, _ = randomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
- return
-}
-
-func randomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
- updates, totals, lastCreatableID = randomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
- return
-}
-
-func randomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
- var imbalance int64
- if simple {
- updates, totals, imbalance = randomDeltas(niter, base, rewardsLevel)
- } else {
- updates, totals, imbalance, lastCreatableID = randomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
- }
-
- oldPool := base[testPoolAddr]
- newPool := oldPool
- newPool.MicroAlgos.Raw += uint64(imbalance)
-
- updates.Upsert(testPoolAddr, newPool)
- totals[testPoolAddr] = newPool
-
- return updates, totals, lastCreatableID
-}
-
func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.Address]basics.AccountData) {
- r, _, err := accountsRound(tx)
+ r, err := accountsRound(tx)
require.NoError(t, err)
require.Equal(t, r, rnd)
- aq, err := accountsDbInit(tx, tx)
+ aq, err := accountsInitDbQueries(tx, tx)
require.NoError(t, err)
defer aq.close()
@@ -382,18 +81,18 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
totals, err := accountsTotals(tx, false)
require.NoError(t, err)
- require.Equal(t, totals.Online.Money.Raw, totalOnline)
+ require.Equal(t, totals.Online.Money.Raw, totalOnline, "mismatching total online money")
require.Equal(t, totals.Offline.Money.Raw, totalOffline)
require.Equal(t, totals.NotParticipating.Money.Raw, totalNotPart)
require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
- d, err := aq.lookup(randomAddress())
+ d, err := aq.lookup(ledgertesting.RandomAddress())
require.NoError(t, err)
require.Equal(t, rnd, d.round)
require.Equal(t, d.accountData, basics.AccountData{})
- onlineAccounts := make(map[basics.Address]*onlineAccount)
+ onlineAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
for addr, data := range accts {
if data.Status == basics.Online {
onlineAccounts[addr] = accountDataToOnline(addr, &data, proto)
@@ -406,7 +105,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
require.Equal(t, i, len(dbtop))
// Compute the top-N accounts ourselves
- var testtop []onlineAccount
+ var testtop []ledgercore.OnlineAccount
for _, data := range onlineAccounts {
testtop = append(testtop, *data)
}
@@ -447,7 +146,7 @@ func TestAccountDBInit(t *testing.T) {
require.NoError(t, err)
defer tx.Rollback()
- accts := randomAccounts(20, true)
+ accts := ledgertesting.RandomAccounts(20, true)
newDB, err := accountsInit(tx, accts, proto)
require.NoError(t, err)
require.True(t, newDB)
@@ -540,10 +239,12 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
defer tx.Rollback()
- accts := randomAccounts(20, true)
+ accts := ledgertesting.RandomAccounts(20, true)
_, err = accountsInit(tx, accts, proto)
require.NoError(t, err)
checkAccounts(t, tx, 0, accts)
+ totals, err := accountsTotals(tx, false)
+ require.NoError(t, err)
// used to determine how many creatables element will be in the test per iteration
numElementsPerSegement := 10
@@ -553,11 +254,12 @@ func TestAccountDBRound(t *testing.T) {
ctbsList, randomCtbs := randomCreatables(numElementsPerSegement)
expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
var baseAccounts lruAccounts
+ var newaccts map[basics.Address]basics.AccountData
baseAccounts.init(nil, 100, 80)
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
- var newaccts map[basics.Address]basics.AccountData
- updates, newaccts, _, lastCreatableID = randomDeltasFull(20, accts, 0, lastCreatableID)
+ updates, newaccts, _, lastCreatableID = ledgertesting.RandomDeltasFull(20, accts, 0, lastCreatableID)
+ totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals)
accts = newaccts
ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs,
expectedDbImage, numElementsPerSegement)
@@ -565,15 +267,26 @@ func TestAccountDBRound(t *testing.T) {
updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts)
err = updatesCnt.accountsLoadOld(tx)
require.NoError(t, err)
- err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, proto)
+ err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
_, err = accountsNewRound(tx, updatesCnt, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
- err = updateAccountsRound(tx, basics.Round(i), 0)
+ err = updateAccountsRound(tx, basics.Round(i))
require.NoError(t, err)
checkAccounts(t, tx, basics.Round(i), accts)
checkCreatables(t, tx, i, expectedDbImage)
}
+
+ // test the accounts totals
+ var updates ledgercore.AccountDeltas
+ for addr, acctData := range newaccts {
+ updates.Upsert(addr, acctData)
+ }
+
+ expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{})
+ actualTotals, err := accountsTotals(tx, false)
+ require.NoError(t, err)
+ require.Equal(t, expectedTotals, actualTotals)
}
// checkCreatables compares the expected database image to the actual databse content
@@ -683,7 +396,7 @@ func randomCreatable(uniqueAssetIds map[basics.CreatableIndex]bool) (
creatable := ledgercore.ModifiedCreatable{
Ctype: ctype,
Created: (crypto.RandUint64() % 2) == 1,
- Creator: randomAddress(),
+ Creator: ledgertesting.RandomAddress(),
Ndeltas: 1,
}
@@ -705,7 +418,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A
updates = make(map[basics.Address]basics.AccountData, numAccounts)
for i := 0; i < numAccounts; i++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
updates[addr] = basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff / uint64(numAccounts)},
Status: basics.NotParticipating,
@@ -804,7 +517,7 @@ func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
accounts := benchmarkInitBalances(b, b.N, dbs, proto)
- qs, err := accountsDbInit(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
require.NoError(b, err)
// read all the balances in the database, shuffled
@@ -977,14 +690,14 @@ func TestAccountsReencoding(t *testing.T) {
}
for _, oldAccData := range oldEncodedAccountsData {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
_, err = tx.ExecContext(ctx, "INSERT INTO accountbase (address, data) VALUES (?, ?)", addr[:], oldAccData)
if err != nil {
return err
}
}
for i := 0; i < 100; i++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
accData := basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff},
Status: basics.NotParticipating,
@@ -1058,7 +771,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
return nil
})
require.NoError(t, err)
- qs, err := accountsDbInit(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
require.NoError(t, err)
require.NotNil(t, qs.listCreatablesStmt)
qs.close()
@@ -1069,7 +782,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
@@ -1170,7 +883,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(-1, idx)
a.Equal(accountDelta{}, data)
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
data, idx = ad.get(addr)
a.Equal(-1, idx)
a.Equal(accountDelta{}, data)
@@ -1217,7 +930,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(addr, address)
a.Equal(accountDelta{new: sample2.new, old: old1}, data)
- addr1 := randomAddress()
+ addr1 := ledgertesting.RandomAddress()
old2 := persistedAccountData{addr: addr1, accountData: basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}}
ad.upsertOld(old2)
a.Equal(2, ad.len())
@@ -1235,7 +948,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(addr, address)
a.Equal(accountDelta{new: sample2.new, old: old2}, data)
- addr2 := randomAddress()
+ addr2 := ledgertesting.RandomAddress()
idx = ad.insert(addr2, sample2)
a.Equal(3, ad.len())
a.Equal(2, idx)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index fe2719620..16d933fbb 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -20,13 +20,9 @@ import (
"container/heap"
"context"
"database/sql"
- "encoding/hex"
"fmt"
"io"
- "os"
- "path/filepath"
"sort"
- "strconv"
"sync"
"sync/atomic"
"time"
@@ -35,7 +31,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -52,24 +47,8 @@ const (
balancesFlushInterval = 5 * time.Second
// pendingDeltasFlushThreshold is the deltas count threshold above we flush the pending balances regardless of the flush interval.
pendingDeltasFlushThreshold = 128
- // trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
- // before added to the trie during trie construction
- trieRebuildAccountChunkSize = 16384
- // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
- trieRebuildCommitFrequency = 65536
- // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
- // we attempt to commit them to disk while writing a batch of rounds balances to disk.
- trieAccumulatedChangesFlush = 256
)
-// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
-// value was calibrated using BenchmarkCalibrateCacheNodeSize
-var trieCachedNodesCount = 9000
-
-// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page
-// value was calibrated using BenchmarkCalibrateNodesPerPage
-var merkleCommitterNodesPerPage = int64(116)
-
// baseAccountsPendingAccountsBufferSize defines the size of the base account pending accounts buffer size.
// At the beginning of a new round, the entries from this buffer are being flushed into the base accounts map.
const baseAccountsPendingAccountsBufferSize = 100000
@@ -99,14 +78,6 @@ const initializingAccountCachesMessageTimeout = 3 * time.Second
// where we end up batching up to 1000 rounds in a single update.
const accountsUpdatePerRoundHighWatermark = 1 * time.Second
-// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
-var TrieMemoryConfig = merkletrie.MemoryConfig{
- NodesCountPerPage: merkleCommitterNodesPerPage,
- CachedNodesCount: trieCachedNodesCount,
- PageFillFactor: 0.95,
- MaxChildrenPagesThreshold: 64,
-}
-
// A modifiedAccount represents an account that has been modified since
// the persistent state stored in the account DB (i.e., in the range of
// rounds covered by the accountUpdates tracker).
@@ -125,28 +96,9 @@ type modifiedAccount struct {
type accountUpdates struct {
// constant variables ( initialized on initialize, and never changed afterward )
- // initAccounts specifies initial account values for database.
- initAccounts map[basics.Address]basics.AccountData
-
- // initProto specifies the initial consensus parameters at the genesis block.
- initProto config.ConsensusParams
-
- // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated
- dbDirectory string
-
- // catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
- catchpointInterval uint64
-
// archivalLedger determines whether the associated ledger was configured as archival ledger or not.
archivalLedger bool
- // catchpointFileHistoryLength defines how many catchpoint files we want to store back.
- // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
- catchpointFileHistoryLength int
-
- // vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
- vacuumOnStartup bool
-
// dynamic variables
// Connection to the database.
@@ -155,9 +107,9 @@ type accountUpdates struct {
// Prepared SQL statements for fast accounts DB lookups.
accountsq *accountsDbQueries
- // dbRound is always exactly accountsRound(),
- // cached to avoid SQL queries.
- dbRound basics.Round
+ // cachedDBRound is always exactly tracker DB round (and therefore, accountsRound()),
+ // cached to use in lookup functions
+ cachedDBRound basics.Round
// deltas stores updates for every round after dbRound.
deltas []ledgercore.AccountDeltas
@@ -181,75 +133,29 @@ type accountUpdates struct {
// i.e., totals is one longer than deltas.
roundTotals []ledgercore.AccountTotals
- // roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
- roundDigest []crypto.Digest
-
// log copied from ledger
log logging.Logger
- // lastFlushTime is the time we last flushed updates to
- // the accounts DB (bumping dbRound).
- lastFlushTime time.Time
-
// ledger is the source ledger, which is used to synchronize
// the rounds at which we need to flush the balances to disk
// in favor of the catchpoint to be generated.
ledger ledgerForTracker
- // The Trie tracking the current account balances. Always matches the balances that were
- // written to the database.
- balancesTrie *merkletrie.Trie
-
- // The last catchpoint label that was written to the database. Should always align with what's in the database.
- // note that this is the last catchpoint *label* and not the catchpoint file.
- lastCatchpointLabel string
-
- // catchpointWriting help to synchronize the catchpoint file writing. When this atomic variable is 0, no writing is going on.
- // Any non-zero value indicates a catchpoint being written.
- catchpointWriting int32
-
- // catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
- // when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
- // otherwise, it would take it's time and perform periodic sleeps between chunks processing.
- catchpointSlowWriting chan struct{}
-
- // ctx is the context for the committing go-routine. It's also used as the "parent" of the catchpoint generation operation.
- ctx context.Context
-
- // ctxCancel is the canceling function for canceling the committing go-routine ( i.e. signaling the committing go-routine that it's time to abort )
- ctxCancel context.CancelFunc
-
// deltasAccum stores the accumulated deltas for every round starting dbRound-1.
deltasAccum []int
- // committedOffset is the offset at which we'd like to persist all the previous account information to disk.
- committedOffset chan deferredCommit
-
// accountsMu is the synchronization mutex for accessing the various non-static variables.
accountsMu deadlock.RWMutex
// accountsReadCond used to synchronize read access to the internal data structures.
accountsReadCond *sync.Cond
- // accountsWriting provides synchronization around the background writing of account balances.
- accountsWriting sync.WaitGroup
-
- // commitSyncerClosed is the blocking channel for synchronizing closing the commitSyncer goroutine. Once it's closed, the
- // commitSyncer can be assumed to have aborted.
- commitSyncerClosed chan struct{}
-
// voters keeps track of Merkle trees of online accounts, used for compact certificates.
voters *votersTracker
// baseAccounts stores the most recently used accounts, at exactly dbRound
baseAccounts lruAccounts
- // the synchronous mode that would be used for the account database.
- synchronousMode db.SynchronousMode
-
- // the synchronous mode that would be used while the accounts database is being rebuilt.
- accountsRebuildSynchronousMode db.SynchronousMode
-
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -300,106 +206,37 @@ func (e *MismatchingDatabaseRoundError) Error() string {
}
// initialize initializes the accountUpdates structure
-func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) {
- au.initProto = genesisProto
- au.initAccounts = genesisAccounts
- au.dbDirectory = filepath.Dir(dbPathPrefix)
+func (au *accountUpdates) initialize(cfg config.Local) {
au.archivalLedger = cfg.Archival
- switch cfg.CatchpointTracking {
- case -1:
- au.catchpointInterval = 0
- default:
- // give a warning, then fall thought
- logging.Base().Warnf("accountUpdates: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
- fallthrough
- case 0:
- if au.archivalLedger {
- au.catchpointInterval = cfg.CatchpointInterval
- } else {
- au.catchpointInterval = 0
- }
- case 1:
- au.catchpointInterval = cfg.CatchpointInterval
- }
- au.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
- if cfg.CatchpointFileHistoryLength < -1 {
- au.catchpointFileHistoryLength = -1
- }
- au.vacuumOnStartup = cfg.OptimizeAccountsDatabaseOnStartup
- // initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active )
- au.commitSyncerClosed = make(chan struct{})
- close(au.commitSyncerClosed)
au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker())
- au.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
- au.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
// log metrics
au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats
au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval
-
}
// loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional
// The close function is expected to be call in pair with loadFromDisk
-func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
+func (au *accountUpdates) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) error {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
- var writingCatchpointRound uint64
- lastBalancesRound, lastestBlockRound, err := au.initializeFromDisk(l)
-
- if err != nil {
- return err
- }
-
- var writingCatchpointDigest crypto.Digest
- writingCatchpointRound, _, err = au.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
+ au.cachedDBRound = lastBalancesRound
+ err := au.initializeFromDisk(l, lastBalancesRound)
if err != nil {
return err
}
-
- writingCatchpointDigest, err = au.initializeCaches(lastBalancesRound, lastestBlockRound, basics.Round(writingCatchpointRound))
- if err != nil {
- return err
- }
-
- if writingCatchpointRound != 0 && au.catchpointInterval != 0 {
- au.generateCatchpoint(basics.Round(writingCatchpointRound), au.lastCatchpointLabel, writingCatchpointDigest, time.Duration(0))
- }
-
- au.voters = &votersTracker{}
- err = au.voters.loadFromDisk(l, au)
- if err != nil {
- return err
- }
-
return nil
}
-// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
-func (au *accountUpdates) waitAccountsWriting() {
- au.accountsWriting.Wait()
-}
-
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (au *accountUpdates) close() {
if au.voters != nil {
au.voters.close()
}
- if au.ctxCancel != nil {
- au.ctxCancel()
- }
- au.waitAccountsWriting()
- // this would block until the commitSyncerClosed channel get closed.
- <-au.commitSyncerClosed
- au.baseAccounts.prune(0)
-}
-// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
-// to avoid memory pressure until the catchpoint file writing is complete.
-func (au *accountUpdates) IsWritingCatchpointFile() bool {
- return atomic.LoadInt32(&au.catchpointWriting) != 0
+ au.baseAccounts.prune(0)
}
// LookupWithRewards returns the account data for a given address at a given round.
@@ -428,7 +265,7 @@ func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults
func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) {
au.accountsMu.RLock()
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
// Sort indices for creatables that have been created/deleted. If this
// turns out to be too inefficient, we could keep around a heap of
@@ -502,7 +339,7 @@ func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex,
return []basics.CreatableLocator{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound}
}
au.accountsMu.RLock()
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
}
@@ -511,11 +348,11 @@ func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex,
// onlineTop returns the top n online accounts, sorted by their normalized
// balance and address, whose voting keys are valid in voteRnd. See the
// normalization description in AccountData.NormalizedOnlineBalance().
-func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*onlineAccount, error) {
+func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*ledgercore.OnlineAccount, error) {
proto := au.ledger.GenesisProto()
au.accountsMu.RLock()
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err := au.roundOffset(rnd)
if err != nil {
@@ -530,7 +367,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
// is not valid in voteRnd. Otherwise, the *onlineAccount is the
// representation of the most recent state of the account, and it
// is online and can vote in voteRnd.
- modifiedAccounts := make(map[basics.Address]*onlineAccount)
+ modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
for o := uint64(0); o < offset; o++ {
for i := 0; i < au.deltas[o].Len(); i++ {
addr, d := au.deltas[o].GetByIdx(i)
@@ -558,12 +395,12 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
//
// Keep asking for more accounts until we get the desired number,
// or there are no more accounts left.
- candidates := make(map[basics.Address]*onlineAccount)
+ candidates := make(map[basics.Address]*ledgercore.OnlineAccount)
batchOffset := uint64(0)
batchSize := uint64(1024)
var dbRound basics.Round
for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) {
- var accts map[basics.Address]*onlineAccount
+ var accts map[basics.Address]*ledgercore.OnlineAccount
start := time.Now()
ledgerAccountsonlinetopCount.Inc(nil)
err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -571,7 +408,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
if err != nil {
return
}
- dbRound, _, err = accountsRound(tx)
+ dbRound, err = accountsRound(tx)
return
})
ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil)
@@ -601,7 +438,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
if dbRound != currentDbRound && dbRound != basics.Round(0) {
// database round doesn't match the last au.dbRound we sampled.
au.accountsMu.RLock()
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
continue
@@ -627,9 +464,9 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
heap.Push(topHeap, data)
}
- var res []*onlineAccount
+ var res []*ledgercore.OnlineAccount
for topHeap.Len() > 0 && uint64(len(res)) < n {
- acct := heap.Pop(topHeap).(*onlineAccount)
+ acct := heap.Pop(topHeap).(*ledgercore.OnlineAccount)
res = append(res, acct)
}
@@ -637,129 +474,72 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
}
}
-// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
-func (au *accountUpdates) GetLastCatchpointLabel() string {
- au.accountsMu.RLock()
- defer au.accountsMu.RUnlock()
- return au.lastCatchpointLabel
-}
-
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return au.getCreatorForRound(rnd, cidx, ctype, true /* take the lock */)
}
-// committedUpTo enqueues committing the balances for round committedRound-lookback.
+// committedUpTo implements the ledgerTracker interface for accountUpdates.
+// The method informs the tracker that committedRound and all it's previous rounds have
+// been committed to the block database. The method returns what is the oldest round
+// number that can be removed from the blocks database as well as the lookback that this
+// tracker maintains.
+func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound, lookback basics.Round) {
+ au.accountsMu.RLock()
+ defer au.accountsMu.RUnlock()
+
+ retRound = basics.Round(0)
+ lookback = basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
+ if committedRound < lookback {
+ return
+ }
+
+ retRound = au.cachedDBRound
+ return
+}
+
+// produceCommittingTask enqueues committing the balances for round committedRound-lookback.
// The deferred committing is done so that we could calculate the historical balances lookback rounds back.
// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistence of this
// operation to a syncer goroutine. The one caveat is that when storing a catchpoint round, we would want to
// wait until the catchpoint creation is done, so that the persistence of the catchpoint file would have an
// uninterrupted view of the balances at a given point of time.
-func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound basics.Round) {
- var isCatchpointRound, hasMultipleIntermediateCatchpoint bool
+func (au *accountUpdates) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
var offset uint64
- var dc deferredCommit
au.accountsMu.RLock()
- defer func() {
- au.accountsMu.RUnlock()
- if dc.offset != 0 {
- au.committedOffset <- dc
- }
- }()
- retRound = basics.Round(0)
- var pendingDeltas int
+ defer au.accountsMu.RUnlock()
- lookback := basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
- if committedRound < lookback {
- return
+ if committedRound < dcr.lookback {
+ return nil
}
- retRound = au.dbRound
- newBase := committedRound - lookback
- if newBase <= au.dbRound {
+ newBase := committedRound - dcr.lookback
+ if newBase <= dbRound {
// Already forgotten
- return
- }
-
- if newBase > au.dbRound+basics.Round(len(au.deltas)) {
- au.log.Panicf("committedUpTo: block %d too far in the future, lookback %d, dbRound %d, deltas %d", committedRound, lookback, au.dbRound, len(au.deltas))
- }
-
- hasIntermediateCatchpoint := false
- hasMultipleIntermediateCatchpoint = false
- // check if there was a catchpoint between au.dbRound+lookback and newBase+lookback
- if au.catchpointInterval > 0 {
- nextCatchpointRound := ((uint64(au.dbRound+lookback) + au.catchpointInterval) / au.catchpointInterval) * au.catchpointInterval
-
- if nextCatchpointRound < uint64(newBase+lookback) {
- mostRecentCatchpointRound := (uint64(committedRound) / au.catchpointInterval) * au.catchpointInterval
- newBase = basics.Round(nextCatchpointRound) - lookback
- if mostRecentCatchpointRound > nextCatchpointRound {
- hasMultipleIntermediateCatchpoint = true
- // skip if there is more than one catchpoint in queue
- newBase = basics.Round(mostRecentCatchpointRound) - lookback
- }
- hasIntermediateCatchpoint = true
- }
+ return nil
}
- // if we're still writing the previous balances, we can't move forward yet.
- if au.IsWritingCatchpointFile() {
- // if we hit this path, it means that we're still writing a catchpoint.
- // see if the new delta range contains another catchpoint.
- if hasIntermediateCatchpoint {
- // check if we're already attempting to perform fast-writing.
- select {
- case <-au.catchpointSlowWriting:
- // yes, we're already doing fast-writing.
- default:
- // no, we're not yet doing fast writing, make it so.
- close(au.catchpointSlowWriting)
- }
- }
- return
+ if newBase > dbRound+basics.Round(len(au.deltas)) {
+ au.log.Panicf("produceCommittingTask: block %d too far in the future, lookback %d, dbRound %d (cached %d), deltas %d", committedRound, dcr.lookback, dbRound, au.cachedDBRound, len(au.deltas))
}
if au.voters != nil {
newBase = au.voters.lowestRound(newBase)
}
- offset = uint64(newBase - au.dbRound)
+ offset = uint64(newBase - dbRound)
offset = au.consecutiveVersion(offset)
- // check to see if this is a catchpoint round
- isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval))
-
// calculate the number of pending deltas
- pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
-
- // If we recently flushed, wait to aggregate some more blocks.
- // ( unless we're creating a catchpoint, in which case we want to flush it right away
- // so that all the instances of the catchpoint would contain exactly the same data )
- flushTime := time.Now()
- if !flushTime.After(au.lastFlushTime.Add(balancesFlushInterval)) && !isCatchpointRound && pendingDeltas < pendingDeltasFlushThreshold {
- return au.dbRound
- }
+ dcr.pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
- if isCatchpointRound && au.archivalLedger {
- // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
- atomic.StoreInt32(&au.catchpointWriting, int32(-1))
- au.catchpointSlowWriting = make(chan struct{}, 1)
- if hasMultipleIntermediateCatchpoint {
- close(au.catchpointSlowWriting)
- }
- }
-
- dc = deferredCommit{
- offset: offset,
- dbRound: au.dbRound,
- lookback: lookback,
- }
- if offset != 0 {
- au.accountsWriting.Add(1)
- }
- return
+ // submit committing task only if offset is non-zero in addition to
+ // 1) no pending catchpoint writes
+ // 2) batching requirements meet or catchpoint round
+ dcr.oldBase = dbRound
+ dcr.offset = offset
+ return dcr
}
func (au *accountUpdates) consecutiveVersion(offset uint64) uint64 {
@@ -794,6 +574,13 @@ func (au *accountUpdates) Totals(rnd basics.Round) (totals ledgercore.AccountTot
return au.totalsImpl(rnd)
}
+// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
+func (au *accountUpdates) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
+ au.accountsMu.RLock()
+ defer au.accountsMu.RUnlock()
+ return au.latestTotalsImpl()
+}
+
// ReadCloseSizer interface implements the standard io.Reader and io.Closer as well
// as supporting the Size() function that let the caller know what the size of the stream would be (in bytes).
type ReadCloseSizer interface {
@@ -815,64 +602,6 @@ func (r *readCloseSizer) Size() (int64, error) {
return r.size, nil
}
-// GetCatchpointStream returns a ReadCloseSizer to the catchpoint file associated with the provided round
-func (au *accountUpdates) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
- dbFileName := ""
- fileSize := int64(0)
- start := time.Now()
- ledgerGetcatchpointCount.Inc(nil)
- err := au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- dbFileName, _, fileSize, err = getCatchpoint(tx, round)
- return
- })
- ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil)
- if err != nil && err != sql.ErrNoRows {
- // we had some sql error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
- }
- if dbFileName != "" {
- catchpointPath := filepath.Join(au.dbDirectory, dbFileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
- if err == nil && file != nil {
- return &readCloseSizer{ReadCloser: file, size: fileSize}, nil
- }
- // else, see if this is a file-not-found error
- if os.IsNotExist(err) {
- // the database told us that we have this file.. but we couldn't find it.
- // delete it from the database.
- err := au.saveCatchpointFile(round, "", 0, "")
- if err != nil {
- au.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
- return nil, err
- }
-
- return nil, ledgercore.ErrNoEntry{}
- }
- // it's some other error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
- }
-
- // if the database doesn't know about that round, see if we have that file anyway:
- fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
- catchpointPath := filepath.Join(au.dbDirectory, fileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
- if err == nil && file != nil {
- // great, if found that we should have had this in the database.. add this one now :
- fileInfo, err := file.Stat()
- if err != nil {
- // we couldn't get the stat, so just return with the file.
- return &readCloseSizer{ReadCloser: file, size: -1}, nil
- }
-
- err = au.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
- if err != nil {
- au.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
- }
- return &readCloseSizer{ReadCloser: file, size: fileInfo.Size()}, nil
- }
- return nil, ledgercore.ErrNoEntry{}
-}
-
// functions below this line are all internal functions
// accountUpdatesLedgerEvaluator is a "ledger emulator" which is used *only* by initializeCaches, as a way to shortcut
@@ -896,7 +625,7 @@ func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest {
}
// CompactCertVoters returns the top online accounts at round rnd.
-func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) {
+func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *ledgercore.VotersForRound, err error) {
return aul.au.voters.getVoters(rnd)
}
@@ -909,13 +638,13 @@ func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping.
return bookkeeping.BlockHeader{}, ledgercore.ErrNoEntry{}
}
-// Totals returns the totals for a given round
-func (aul *accountUpdatesLedgerEvaluator) Totals(rnd basics.Round) (ledgercore.AccountTotals, error) {
- return aul.au.totalsImpl(rnd)
+// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
+func (aul *accountUpdatesLedgerEvaluator) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
+ return aul.au.latestTotalsImpl()
}
// CheckDup test to see if the given transaction id/lease already exists. It's not needed by the accountUpdatesLedgerEvaluator and implemented as a stub.
-func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error {
+func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
// this is a non-issue since this call will never be made on non-validating evaluation
return fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initialization ")
}
@@ -941,201 +670,23 @@ func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.Accoun
return
}
-// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
-// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
-// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
-func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) {
- var blk bookkeeping.Block
- var delta ledgercore.StateDelta
-
- accLedgerEval := accountUpdatesLedgerEvaluator{
- au: au,
- }
- if lastBalancesRound < lastestBlockRound {
- accLedgerEval.prevHeader, err = au.ledger.BlockHdr(lastBalancesRound)
- if err != nil {
- return
- }
- }
-
- skipAccountCacheMessage := make(chan struct{})
- writeAccountCacheMessageCompleted := make(chan struct{})
- defer func() {
- close(skipAccountCacheMessage)
- select {
- case <-writeAccountCacheMessageCompleted:
- if err == nil {
- au.log.Infof("initializeCaches completed initializing account data caches")
- }
- default:
- }
- }()
-
- // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds.
- // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message
- // within the above timeout.
- go func() {
- select {
- case <-time.After(initializingAccountCachesMessageTimeout):
- au.log.Infof("initializeCaches is initializing account data caches")
- close(writeAccountCacheMessageCompleted)
- case <-skipAccountCacheMessage:
- }
- }()
-
- blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream)
- blockEvalFailed := make(chan struct{}, 1)
- var blockRetrievalError error
- go func() {
- defer close(blocksStream)
- for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ {
- blk, blockRetrievalError = au.ledger.Block(roundNumber)
- if blockRetrievalError != nil {
- return
- }
- select {
- case blocksStream <- blk:
- case <-blockEvalFailed:
- return
- }
- }
- }()
-
- lastFlushedRound := lastBalancesRound
- const accountsCacheLoadingMessageInterval = 5 * time.Second
- lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2)
-
- // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that
- // we exit this mode when we're done.
- rollbackSynchronousMode := false
- defer func() {
- if rollbackSynchronousMode {
- // restore default synchronous mode
- au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull)
- }
- }()
-
- for blk := range blocksStream {
- delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval)
- if err != nil {
- close(blockEvalFailed)
- return
- }
-
- au.newBlockImpl(blk, delta)
-
- if blk.Round() == basics.Round(writingCatchpointRound) {
- catchpointBlockDigest = blk.Digest()
- }
-
- // flush to disk if any of the following applies:
- // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
- // 2. if we completed the loading and we loaded up more than 320 rounds.
- flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
- loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
- if flushIntervalExceed || loadCompleted {
- // adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
- au.lastFlushTime = time.Now().Add(-balancesFlushInterval)
-
- if !rollbackSynchronousMode {
- // switch to rebuild synchronous mode to improve performance
- au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
-
- // flip the switch to rollback the synchronous mode once we're done.
- rollbackSynchronousMode = true
- }
-
- // The unlocking/relocking here isn't very elegant, but it does get the work done :
- // this method is called on either startup or when fast catchup is complete. In the former usecase, the
- // locking here is not really needed since the system is only starting up, and there are no other
- // consumers for the accounts update. On the latter usecase, the function would always have exactly 320 rounds,
- // and therefore this wouldn't be an issue.
- // However, to make sure we're not missing any other future codepath, unlocking here and re-locking later on is a pretty
- // safe bet.
- au.accountsMu.Unlock()
-
- // flush the account data
- au.committedUpTo(blk.Round())
-
- // wait for the writing to complete.
- au.waitAccountsWriting()
-
- // The au.dbRound after writing should be ~320 behind the block round.
- roundsBehind := blk.Round() - au.dbRound
-
- au.accountsMu.Lock()
-
- // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
- if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) {
- // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
- // would just accumulate in memory.
- close(blockEvalFailed)
- au.log.Errorf("initializeCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", au.dbRound, blk.Round())
- err = fmt.Errorf("initializeCaches failed to initialize the account data caches")
- return
- }
-
- // and once we flushed it to disk, update the lastFlushedRound
- lastFlushedRound = blk.Round()
- }
-
- // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess.
- if time.Now().Sub(lastProgressMessage) > accountsCacheLoadingMessageInterval {
- // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right.
- select {
- case skipAccountCacheMessage <- struct{}{}:
- // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written.
- close(writeAccountCacheMessageCompleted)
- default:
- }
- au.log.Infof("initializeCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
- lastProgressMessage = time.Now()
- }
-
- // prepare for the next iteration.
- accLedgerEval.prevHeader = *delta.Hdr
- }
-
- if blockRetrievalError != nil {
- err = blockRetrievalError
- }
- return
+// latestTotalsImpl returns the totals of all accounts for the most recent round, as well as the round number
+func (au *accountUpdates) latestTotalsImpl() (basics.Round, ledgercore.AccountTotals, error) {
+ offset := len(au.deltas)
+ rnd := au.cachedDBRound + basics.Round(len(au.deltas))
+ return rnd, au.roundTotals[offset], nil
}
// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
-// and preparing the accountUpdates for operation, including initializing the commitSyncer goroutine.
-func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRound, lastestBlockRound basics.Round, err error) {
+// and preparing the accountUpdates for operation.
+func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
au.ledger = l
- if au.initAccounts == nil {
- err = fmt.Errorf("accountUpdates.initializeFromDisk: initAccounts not set")
- return
- }
-
- lastestBlockRound = l.Latest()
start := time.Now()
ledgerAccountsinitCount.Inc(nil)
err = au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- var err0 error
- au.dbRound, err0 = au.accountsInitialize(ctx, tx)
- if err0 != nil {
- return err0
- }
- // Check for blocks DB and tracker DB un-sync
- if au.dbRound > lastestBlockRound {
- au.log.Warnf("accountUpdates.initializeFromDisk: resetting accounts DB (on round %v, but blocks DB's latest is %v)", au.dbRound, lastestBlockRound)
- err0 = accountsReset(tx)
- if err0 != nil {
- return err0
- }
- au.dbRound, err0 = au.accountsInitialize(ctx, tx)
- if err0 != nil {
- return err0
- }
- }
-
totals, err0 := accountsTotals(tx, false)
if err0 != nil {
return err0
@@ -1150,19 +701,12 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
return
}
- // the VacuumDatabase would be a no-op if au.vacuumOnStartup is cleared.
- au.vacuumDatabase(context.Background())
- if err != nil {
- return
- }
-
- au.accountsq, err = accountsDbInit(au.dbs.Rdb.Handle, au.dbs.Wdb.Handle)
- au.lastCatchpointLabel, _, err = au.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
+ au.accountsq, err = accountsInitDbQueries(au.dbs.Rdb.Handle, au.dbs.Wdb.Handle)
if err != nil {
return
}
- hdr, err := l.BlockHdr(au.dbRound)
+ hdr, err := l.BlockHdr(lastBalancesRound)
if err != nil {
return
}
@@ -1173,505 +717,14 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
au.accounts = make(map[basics.Address]modifiedAccount)
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
au.deltasAccum = []int{0}
- au.roundDigest = nil
-
- au.catchpointWriting = 0
- // keep these channel closed if we're not generating catchpoint
- au.catchpointSlowWriting = make(chan struct{}, 1)
- close(au.catchpointSlowWriting)
- au.ctx, au.ctxCancel = context.WithCancel(context.Background())
- au.committedOffset = make(chan deferredCommit, 1)
- au.commitSyncerClosed = make(chan struct{})
- go au.commitSyncer(au.committedOffset)
-
- lastBalancesRound = au.dbRound
- au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
- return
-}
-
-// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
-func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
- // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
- // recent updated to be in-cache, and "older" nodes will be left alone.
- for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
- // the following takes the rewards & 255 -> hash[i]
- hash[i] = byte(rewards)
- }
- entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
- copy(hash[4:], entryHash[:])
- return hash[:]
-}
-
-// accountsInitialize initializes the accounts DB if needed and return current account round.
-// as part of the initialization, it tests the current database schema version, and perform upgrade
-// procedures to bring it up to the database schema supported by the binary.
-func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (basics.Round, error) {
- // check current database version.
- dbVersion, err := db.GetUserVersion(ctx, tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to read database schema version : %v", err)
- }
-
- // if database version is greater than supported by current binary, write a warning. This would keep the existing
- // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
- if dbVersion > accountDBVersion {
- au.log.Warnf("accountsInitialize database schema version is %d, but algod supports only %d", dbVersion, accountDBVersion)
- }
-
- if dbVersion < accountDBVersion {
- au.log.Infof("accountsInitialize upgrading database schema from version %d to version %d", dbVersion, accountDBVersion)
- // newDatabase is determined during the tables creations. If we're filling the database with accounts,
- // then we set this variable to true, allowing some of the upgrades to be skipped.
- var newDatabase bool
- for dbVersion < accountDBVersion {
- au.log.Infof("accountsInitialize performing upgrade from version %d", dbVersion)
- // perform the initialization/upgrade
- switch dbVersion {
- case 0:
- dbVersion, newDatabase, err = au.upgradeDatabaseSchema0(ctx, tx)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
- return 0, err
- }
- case 1:
- dbVersion, err = au.upgradeDatabaseSchema1(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
- return 0, err
- }
- case 2:
- dbVersion, err = au.upgradeDatabaseSchema2(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
- return 0, err
- }
- case 3:
- dbVersion, err = au.upgradeDatabaseSchema3(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
- return 0, err
- }
- case 4:
- dbVersion, err = au.upgradeDatabaseSchema4(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err)
- return 0, err
- }
- default:
- return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion)
- }
- }
-
- au.log.Infof("accountsInitialize database schema upgrade complete")
- }
-
- rnd, hashRound, err := accountsRound(tx)
- if err != nil {
- return 0, err
- }
-
- if hashRound != rnd {
- // if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
- // with the hashes.
- err = resetAccountHashes(tx)
- if err != nil {
- return 0, err
- }
- // if catchpoint is disabled on this node, we could complete the initialization right here.
- if au.catchpointInterval == 0 {
- return rnd, nil
- }
- }
-
- // create the merkle trie for the balances
- committer, err := MakeMerkleCommitter(tx, false)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
- }
-
- trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
- }
-
- // we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
- // we can figure this out by examining the hash of the root:
- rootHash, err := trie.RootHash()
- if err != nil {
- return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
- }
-
- if rootHash.IsZero() {
- au.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
- defer accountBuilderIt.Close(ctx)
- startTrieBuildTime := time.Now()
- accountsCount := 0
- lastRebuildTime := startTrieBuildTime
- pendingAccounts := 0
- totalOrderedAccounts := 0
- for {
- accts, processedRows, err := accountBuilderIt.Next(ctx)
- if err == sql.ErrNoRows {
- // the account builder would return sql.ErrNoRows when no more data is available.
- break
- } else if err != nil {
- return rnd, err
- }
-
- if len(accts) > 0 {
- accountsCount += len(accts)
- pendingAccounts += len(accts)
- for _, acct := range accts {
- added, err := trie.Add(acct.digest)
- if err != nil {
- return rnd, fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
- }
- if !added {
- au.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), acct.address)
- }
- }
-
- if pendingAccounts >= trieRebuildCommitFrequency {
- // this trie Evict will commit using the current transaction.
- // if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
- }
- pendingAccounts = 0
- }
-
- if time.Now().Sub(lastRebuildTime) > 5*time.Second {
- // let the user know that the trie is still being rebuilt.
- au.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
- lastRebuildTime = time.Now()
- }
- } else if processedRows > 0 {
- totalOrderedAccounts += processedRows
- // if it's not ordered, we can ignore it for now; we'll just increase the counters and emit logs periodically.
- if time.Now().Sub(lastRebuildTime) > 5*time.Second {
- // let the user know that the trie is still being rebuilt.
- au.log.Infof("accountsInitialize still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
- lastRebuildTime = time.Now()
- }
- }
- }
-
- // this trie Evict will commit using the current transaction.
- // if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
- }
-
- // we've just updated the merkle trie, update the hashRound to reflect that.
- err = updateAccountsRound(tx, rnd, rnd)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err)
- }
-
- au.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Now().Sub(startTrieBuildTime))
- }
- au.balancesTrie = trie
- return rnd, nil
-}
-
-// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
-//
-// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
-// Any database of version 2.0.8 would be of version 0. At this point, the database might
-// have the following tables : ( i.e. a newly created database would not have these )
-// * acctrounds
-// * accounttotals
-// * accountbase
-// * assetcreators
-// * storedcatchpoints
-// * accounthashes
-// * catchpointstate
-//
-// As the first step of the upgrade, the above tables are being created if they do not already exists.
-// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
-// Last, in case the database was just created, it would get initialized with the following:
-// The accountbase would get initialized with the au.initAccounts
-// The accounttotals would get initialized to align with the initialization account added to accountbase
-// The acctrounds would get updated to indicate that the balance matches round 0
-//
-func (au *accountUpdates) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, newDatabase bool, err error) {
- au.log.Infof("accountsInitialize initializing schema")
- newDatabase, err = accountsInit(tx, au.initAccounts, au.initProto)
- if err != nil {
- return 0, newDatabase, fmt.Errorf("accountsInitialize unable to initialize schema : %v", err)
- }
- _, err = db.SetUserVersion(ctx, tx, 1)
- if err != nil {
- return 0, newDatabase, fmt.Errorf("accountsInitialize unable to update database schema version from 0 to 1: %v", err)
- }
- return 1, newDatabase, nil
-}
-
-// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
-//
-// The schema updated to version 2 intended to ensure that the encoding of all the accounts data is
-// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
-// the upgraded messagepack was decoding the account data correctly, but would have different
-// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
-// would have different encoded representation than the one on disk.
-// To address this, this startup procedure would attempt to scan all the accounts data. for each account data, we would
-// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
-// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
-// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merkle trie using the new
-// encoded accounts.
-//
-// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
-// a functional update to it's content.
-//
-func (au *accountUpdates) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- var modifiedAccounts uint
- if newDatabase {
- goto schemaUpdateComplete
- }
-
- // update accounts encoding.
- au.log.Infof("accountsInitialize verifying accounts data encoding")
- modifiedAccounts, err = reencodeAccounts(ctx, tx)
- if err != nil {
- return 0, err
- }
-
- if modifiedAccounts > 0 {
- au.log.Infof("accountsInitialize reencoded %d accounts", modifiedAccounts)
-
- au.log.Infof("accountsInitialize resetting account hashes")
- // reset the merkle trie
- err = resetAccountHashes(tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to reset account hashes : %v", err)
- }
-
- au.log.Infof("accountsInitialize preparing queries")
- // initialize a new accountsq with the incoming transaction.
- accountsq, err := accountsDbInit(tx, tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to prepare queries : %v", err)
- }
-
- // close the prepared statements when we're done with them.
- defer accountsq.close()
-
- au.log.Infof("accountsInitialize resetting prior catchpoints")
- // delete the last catchpoint label if we have any.
- _, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to clear prior catchpoint : %v", err)
- }
-
- au.log.Infof("accountsInitialize deleting stored catchpoints")
- // delete catchpoints.
- err = au.deleteStoredCatchpoints(ctx, accountsq)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to delete stored catchpoints : %v", err)
- }
- } else {
- au.log.Infof("accountsInitialize found that no accounts needed to be reencoded")
- }
-
-schemaUpdateComplete:
- // update version
- _, err = db.SetUserVersion(ctx, tx, 2)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 1 to 2: %v", err)
- }
- return 2, nil
-}
-
-// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
-//
-// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
-// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
-// step becomes a no-op.
-//
-func (au *accountUpdates) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- if !newDatabase {
- au.vacuumOnStartup = true
- }
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 3)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 2 to 3: %v", err)
- }
- return 3, nil
-}
-
-// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
-// adding the normalizedonlinebalance column to the accountbase table.
-func (au *accountUpdates) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- err = accountsAddNormalizedBalance(tx, au.ledger.GenesisProto())
- if err != nil {
- return 0, err
- }
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 4)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 3 to 4: %v", err)
- }
- return 4, nil
-}
-
-// upgradeDatabaseSchema4 does not change the schema but migrates data:
-// remove empty AccountData entries from accountbase table
-func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- queryAddresses := au.catchpointInterval != 0
- var numDeleted int64
- var addresses []basics.Address
-
- if newDatabase {
- goto done
- }
-
- numDeleted, addresses, err = removeEmptyAccountData(tx, queryAddresses)
- if err != nil {
- return 0, err
- }
-
- if queryAddresses && len(addresses) > 0 {
- mc, err := MakeMerkleCommitter(tx, false)
- if err != nil {
- // at this point record deleted and DB is pruned for account data
- // if hash deletion fails just log it and do not abort startup
- au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err)
- goto done
- }
- trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
- if err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err)
- goto done
- }
-
- var totalHashesDeleted int
- for _, addr := range addresses {
- hash := accountHashBuilder(addr, basics.AccountData{}, []byte{0x80})
- deleted, err := trie.Delete(hash)
- if err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err)
- } else {
- if !deleted {
- au.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr)
- } else {
- totalHashesDeleted++
- }
- }
- }
-
- if _, err = trie.Commit(); err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to commit changes to merkle trie: %v", err)
- }
-
- au.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted)
- }
-
-done:
- au.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted)
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 5)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 4 to 5: %v", err)
- }
- return 5, nil
-}
-
-// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
-// once all the files have been deleted, it would go ahead and remove the entries from the table.
-func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) {
- catchpointsFilesChunkSize := 50
- for {
- fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
- if err != nil {
- return err
- }
- if len(fileNames) == 0 {
- break
- }
-
- for round, fileName := range fileNames {
- absCatchpointFileName := filepath.Join(au.dbDirectory, fileName)
- err = os.Remove(absCatchpointFileName)
- if err == nil || os.IsNotExist(err) {
- // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
- } else {
- // we can't delete the file, abort -
- return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
- }
- // clear the entry from the database
- err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
-func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDeltas) (err error) {
- if au.catchpointInterval == 0 {
- return nil
- }
- var added, deleted bool
- accumulatedChanges := 0
-
- for i := 0; i < accountsDeltas.len(); i++ {
- addr, delta := accountsDeltas.getByIdx(i)
- if !delta.old.accountData.IsZero() {
- deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData))
- deleted, err = au.balancesTrie.Delete(deleteHash)
- if err != nil {
- return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
- }
- if !deleted {
- au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
- } else {
- accumulatedChanges++
- }
- }
-
- if !delta.new.IsZero() {
- addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
- added, err = au.balancesTrie.Add(addHash)
- if err != nil {
- return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
- }
- if !added {
- au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
- } else {
- accumulatedChanges++
- }
- }
- }
- if accumulatedChanges >= trieAccumulatedChangesFlush {
- accumulatedChanges = 0
- _, err = au.balancesTrie.Commit()
- if err != nil {
- return
- }
- }
-
- // write it all to disk.
- if accumulatedChanges > 0 {
- _, err = au.balancesTrie.Commit()
- }
+ au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
return
}
// newBlockImpl is the accountUpdates implementation of the ledgerTracker interface. This is the "internal" facing function
// which assumes that no lock need to be taken.
func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.StateDelta) {
- proto := config.Consensus[blk.CurrentProtocol]
rnd := blk.Round()
if rnd <= au.latest() {
@@ -1680,41 +733,17 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
}
if rnd != au.latest()+1 {
- au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
+ au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.cachedDBRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.Accts)
au.versions = append(au.versions, blk.CurrentProtocol)
au.creatableDeltas = append(au.creatableDeltas, delta.Creatables)
- au.roundDigest = append(au.roundDigest, blk.Digest())
au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1])
- var ot basics.OverflowTracker
- newTotals := au.roundTotals[len(au.roundTotals)-1]
- allBefore := newTotals.All()
- newTotals.ApplyRewards(delta.Hdr.RewardsLevel, &ot)
-
au.baseAccounts.flushPendingWrites()
- var previousAccountData basics.AccountData
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
- if latestAcctData, has := au.accounts[addr]; has {
- previousAccountData = latestAcctData.data
- } else if baseAccountData, has := au.baseAccounts.read(addr); has {
- previousAccountData = baseAccountData.accountData
- } else {
- // it's missing from the base accounts, so we'll try to load it from disk.
- if acctData, err := au.accountsq.lookup(addr); err != nil {
- au.log.Panicf("accountUpdates: newBlockImpl failed to lookup account %v when processing round %d : %v", addr, rnd, err)
- } else {
- previousAccountData = acctData.accountData
- au.baseAccounts.write(acctData)
- }
- }
-
- newTotals.DelAccount(proto, previousAccountData, &ot)
- newTotals.AddAccount(proto, data, &ot)
-
macct := au.accounts[addr]
macct.ndeltas++
macct.data = data
@@ -1730,15 +759,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.creatables[cidx] = mcreat
}
- if ot.Overflowed {
- au.log.Panicf("accountUpdates: newBlockImpl %d overflowed totals", rnd)
- }
- allAfter := newTotals.All()
- if allBefore != allAfter {
- au.log.Panicf("accountUpdates: newBlockImpl sum of money changed from %d to %d", allBefore.Raw, allAfter.Raw)
- }
-
- au.roundTotals = append(au.roundTotals, newTotals)
+ au.roundTotals = append(au.roundTotals, delta.Totals)
// calling prune would drop old entries from the base accounts.
newBaseAccountSize := (len(au.accounts) + 1) + baseAccountsPendingAccountsBufferSize
@@ -1766,7 +787,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
var persistedData persistedAccountData
withRewards := true
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1836,7 +857,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
}
au.accountsMu.RLock()
needUnlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
}
@@ -1857,7 +878,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
var offset uint64
var persistedData persistedAccountData
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1922,7 +943,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
}
au.accountsMu.RLock()
needUnlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
} else {
@@ -1948,7 +969,7 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
var dbRound basics.Round
var offset uint64
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1996,7 +1017,7 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
au.accountsMu.RLock()
unlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
} else {
@@ -2006,274 +1027,156 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
}
-// accountsCreateCatchpointLabel creates a catchpoint label and write it.
-func (au *accountUpdates) accountsCreateCatchpointLabel(committedRound basics.Round, totals ledgercore.AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
- cpLabel := ledgercore.MakeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
- label = cpLabel.String()
- _, err = au.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
- return
-}
-
// roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken.
func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) {
- if rnd < au.dbRound {
+ if rnd < au.cachedDBRound {
err = &RoundOffsetError{
round: rnd,
- dbRound: au.dbRound,
+ dbRound: au.cachedDBRound,
}
return
}
- off := uint64(rnd - au.dbRound)
+ off := uint64(rnd - au.cachedDBRound)
if off > uint64(len(au.deltas)) {
- err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
+ err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.cachedDBRound, len(au.deltas))
return
}
return off, nil
}
-// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and
-// send the tasks to commitRound for completing the operation.
-func (au *accountUpdates) commitSyncer(deferredCommits chan deferredCommit) {
- defer close(au.commitSyncerClosed)
- for {
- select {
- case committedOffset, ok := <-deferredCommits:
- if !ok {
- return
- }
- au.commitRound(committedOffset.offset, committedOffset.dbRound, committedOffset.lookback)
- case <-au.ctx.Done():
- // drain the pending commits queue:
- drained := false
- for !drained {
- select {
- case <-deferredCommits:
- au.accountsWriting.Done()
- default:
- drained = true
- }
- }
- return
- }
- }
-}
+func (au *accountUpdates) handleUnorderedCommit(offset uint64, dbRound basics.Round, lookback basics.Round) {
-// commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly.
-func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) {
- var stats telemetryspec.AccountsUpdateMetrics
- var updateStats bool
+}
+// prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly.
+func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
if au.logAccountUpdatesMetrics {
now := time.Now()
if now.Sub(au.lastMetricsLogTime) >= au.logAccountUpdatesInterval {
- updateStats = true
+ dcc.updateStats = true
au.lastMetricsLogTime = now
}
}
- defer au.accountsWriting.Done()
- au.accountsMu.RLock()
-
- // we can exit right away, as this is the result of mis-ordered call to committedUpTo.
- if au.dbRound < dbRound || offset < uint64(au.dbRound-dbRound) {
- // if this is an archival ledger, we might need to update the catchpointWriting variable.
- if au.archivalLedger {
- // determine if this was a catchpoint round
- isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
- if isCatchpointRound {
- // it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
- atomic.StoreInt32(&au.catchpointWriting, 0)
- }
- }
- au.accountsMu.RUnlock()
- return
- }
-
- // adjust the offset according to what happened meanwhile..
- offset -= uint64(au.dbRound - dbRound)
-
- // if this iteration need to flush out zero rounds, just return right away.
- // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be
- // flush, without the commitRound have a chance of committing these rounds.
- if offset == 0 {
- au.accountsMu.RUnlock()
- return
- }
-
- dbRound = au.dbRound
+ offset := dcc.offset
- newBase := basics.Round(offset) + dbRound
- flushTime := time.Now()
- isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
+ au.accountsMu.RLock()
// create a copy of the deltas, round totals and protos for the range we're going to flush.
- deltas := make([]ledgercore.AccountDeltas, offset, offset)
- creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset, offset)
- roundTotals := make([]ledgercore.AccountTotals, offset+1, offset+1)
- copy(deltas, au.deltas[:offset])
+ dcc.deltas = make([]ledgercore.AccountDeltas, offset)
+ creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset)
+ dcc.roundTotals = au.roundTotals[offset]
+ copy(dcc.deltas, au.deltas[:offset])
copy(creatableDeltas, au.creatableDeltas[:offset])
- copy(roundTotals, au.roundTotals[:offset+1])
// verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
if au.versions[1] != au.versions[offset] {
au.accountsMu.RUnlock()
- au.log.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
- return
- }
- consensusVersion := au.versions[1]
-
- var committedRoundDigest crypto.Digest
- if isCatchpointRound {
- committedRoundDigest = au.roundDigest[offset+uint64(lookback)-1]
+ // in scheduleCommit, we expect that this function to update the catchpointWriting when
+ // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
+ // here would prevent us from "forgetting" to update this variable later on.
+ // The same is repeated in commitRound on errors.
+ if dcc.isCatchpointRound && au.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
+ return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
}
// compact all the deltas - when we're trying to persist multiple rounds, we might have the same account
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
- compactDeltas := makeCompactAccountDeltas(deltas, au.baseAccounts)
- compactCreatableDeltas := compactCreatableDeltas(creatableDeltas)
+ dcc.compactAccountDeltas = makeCompactAccountDeltas(dcc.deltas, au.baseAccounts)
+ dcc.compactCreatableDeltas = compactCreatableDeltas(creatableDeltas)
au.accountsMu.RUnlock()
- // in committedUpTo, we expect that this function to update the catchpointWriting when
- // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
- // here would prevent us from "forgetting" to update this variable later on.
- defer func() {
- if isCatchpointRound && au.archivalLedger {
- atomic.StoreInt32(&au.catchpointWriting, 0)
- }
- }()
-
- var catchpointLabel string
- beforeUpdatingBalancesTime := time.Now()
- var trieBalancesHash crypto.Digest
-
- genesisProto := au.ledger.GenesisProto()
+ dcc.genesisProto = au.ledger.GenesisProto()
- start := time.Now()
- ledgerCommitroundCount.Inc(nil)
- var updatedPersistedAccounts []persistedAccountData
- if updateStats {
- stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano())
+ if dcc.updateStats {
+ dcc.stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano())
}
- err := au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- treeTargetRound := basics.Round(0)
- if au.catchpointInterval > 0 {
- mc, err0 := MakeMerkleCommitter(tx, false)
- if err0 != nil {
- return err0
- }
- if au.balancesTrie == nil {
- trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
- if err != nil {
- au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
- return err
- }
- au.balancesTrie = trie
- } else {
- au.balancesTrie.SetCommitter(mc)
- }
- treeTargetRound = dbRound + basics.Round(offset)
- }
-
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
- if updateStats {
- stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano())
- }
-
- err = compactDeltas.accountsLoadOld(tx)
- if err != nil {
- return err
- }
-
- if updateStats {
- stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - stats.OldAccountPreloadDuration
- }
-
- err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], config.Consensus[consensusVersion])
- if err != nil {
- return err
- }
+ return nil
+}
- if updateStats {
- stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
- }
+// commitRound closure is called within the same transaction for all trackers
+// it receives current offset and dbRound
+func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
+ offset := dcc.offset
+ dbRound := dcc.oldBase
- err = au.accountsUpdateBalances(compactDeltas)
+ defer func() {
if err != nil {
- return err
+ if dcc.isCatchpointRound && au.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
}
+ }()
- if updateStats {
- now := time.Duration(time.Now().UnixNano())
- stats.MerkleTrieUpdateDuration = now - stats.MerkleTrieUpdateDuration
- stats.AccountsWritingDuration = now
- }
+ _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
+ if err != nil {
+ return err
+ }
- // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
- // so that we can update the base account back.
- updatedPersistedAccounts, err = accountsNewRound(tx, compactDeltas, compactCreatableDeltas, genesisProto, dbRound+basics.Round(offset))
- if err != nil {
- return err
- }
+ if dcc.updateStats {
+ dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano())
+ }
- if updateStats {
- stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - stats.AccountsWritingDuration
- }
+ err = dcc.compactAccountDeltas.accountsLoadOld(tx)
+ if err != nil {
+ return err
+ }
- err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound)
- if err != nil {
- return err
- }
+ if dcc.updateStats {
+ dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.OldAccountPreloadDuration
+ }
- if isCatchpointRound {
- trieBalancesHash, err = au.balancesTrie.RootHash()
- if err != nil {
- return
- }
- }
- return nil
- })
- ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
+ err = accountsPutTotals(tx, dcc.roundTotals, false)
if err != nil {
- au.balancesTrie = nil
- au.log.Warnf("unable to advance account snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
- return
+ return err
}
- if updateStats {
- stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - stats.DatabaseCommitDuration - stats.AccountsWritingDuration - stats.MerkleTrieUpdateDuration - stats.OldAccountPreloadDuration
+ if dcc.updateStats {
+ dcc.stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano())
}
- if isCatchpointRound {
- catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash)
- if err != nil {
- au.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
- }
+ // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
+ // so that we can update the base account back.
+ dcc.updatedPersistedAccounts, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ if err != nil {
+ return err
}
- if au.balancesTrie != nil {
- _, err = au.balancesTrie.Evict(false)
- if err != nil {
- au.log.Warnf("merkle trie failed to evict: %v", err)
- }
+
+ if dcc.updateStats {
+ dcc.stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.AccountsWritingDuration
}
- if isCatchpointRound && catchpointLabel != "" {
- au.lastCatchpointLabel = catchpointLabel
+ return
+}
+
+func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ if dcc.updateStats {
+ spentDuration := dcc.stats.DatabaseCommitDuration + dcc.stats.AccountsWritingDuration + dcc.stats.MerkleTrieUpdateDuration + dcc.stats.OldAccountPreloadDuration
+ dcc.stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - spentDuration
}
- updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime)
- if updateStats {
- stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+ newBase := dcc.newBase
+
+ dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
+
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
}
+
au.accountsMu.Lock()
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
- for i := 0; i < compactDeltas.len(); i++ {
- addr, acctUpdate := compactDeltas.getByIdx(i)
+ for i := 0; i < dcc.compactAccountDeltas.len(); i++ {
+ addr, acctUpdate := dcc.compactAccountDeltas.getByIdx(i)
cnt := acctUpdate.ndeltas
macct, ok := au.accounts[addr]
if !ok {
@@ -2290,11 +1193,11 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
}
}
- for _, persistedAcct := range updatedPersistedAccounts {
+ for _, persistedAcct := range dcc.updatedPersistedAccounts {
au.baseAccounts.write(persistedAcct)
}
- for cidx, modCrt := range compactCreatableDeltas {
+ for cidx, modCrt := range dcc.compactCreatableDeltas {
cnt := modCrt.Ndeltas
mcreat, ok := au.creatables[cidx]
if !ok {
@@ -2313,39 +1216,29 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
- au.roundDigest = au.roundDigest[offset:]
au.versions = au.versions[offset:]
au.roundTotals = au.roundTotals[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
- au.dbRound = newBase
- au.lastFlushTime = flushTime
+ au.cachedDBRound = newBase
au.accountsMu.Unlock()
- if updateStats {
- stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - stats.MemoryUpdatesDuration
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.MemoryUpdatesDuration
}
au.accountsReadCond.Broadcast()
- if isCatchpointRound && au.archivalLedger && catchpointLabel != "" {
- // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
- // the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
- au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration)
- }
-
// log telemetry event
- if updateStats {
- stats.StartRound = uint64(dbRound)
- stats.RoundsCount = offset
- stats.UpdatedAccountsCount = uint64(len(updatedPersistedAccounts))
- stats.UpdatedCreatablesCount = uint64(len(compactCreatableDeltas))
+ if dcc.updateStats {
+ dcc.stats.StartRound = uint64(dbRound)
+ dcc.stats.RoundsCount = offset
+ dcc.stats.UpdatedAccountsCount = uint64(len(dcc.updatedPersistedAccounts))
+ dcc.stats.UpdatedCreatablesCount = uint64(len(dcc.compactCreatableDeltas))
- var details struct {
- }
- au.log.Metrics(telemetryspec.Accounts, stats, details)
+ var details struct{}
+ au.log.Metrics(telemetryspec.Accounts, dcc.stats, details)
}
-
}
// compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single
@@ -2381,189 +1274,11 @@ func compactCreatableDeltas(creatableDeltas []map[basics.CreatableIndex]ledgerco
// latest returns the latest round
func (au *accountUpdates) latest() basics.Round {
- return au.dbRound + basics.Round(len(au.deltas))
-}
-
-// generateCatchpoint generates a single catchpoint file
-func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
- beforeGeneratingCatchpointTime := time.Now()
- catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
- BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
- }
-
- // the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
- // before the catchpoint file generation could be completed.
- retryCatchpointCreation := false
- au.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
- defer func() {
- if !retryCatchpointCreation {
- // clear the writingCatchpoint flag
- _, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- }
- }
- }()
-
- _, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- return
- }
-
- relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
- absCatchpointFileName := filepath.Join(au.dbDirectory, relCatchpointFileName)
-
- more := true
- const shortChunkExecutionDuration = 50 * time.Millisecond
- const longChunkExecutionDuration = 1 * time.Second
- var chunkExecutionDuration time.Duration
- select {
- case <-au.catchpointSlowWriting:
- chunkExecutionDuration = longChunkExecutionDuration
- default:
- chunkExecutionDuration = shortChunkExecutionDuration
- }
-
- var catchpointWriter *catchpointWriter
- start := time.Now()
- ledgerGeneratecatchpointCount.Inc(nil)
- err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter = makeCatchpointWriter(au.ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
- for more {
- stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
- writeStepStartTime := time.Now()
- more, err = catchpointWriter.WriteStep(stepCtx)
- // accumulate the actual time we've spent writing in this step.
- catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
- stepCancelFunction()
- if more && err == nil {
- // we just wrote some data, but there is more to be written.
- // go to sleep for while.
- // before going to sleep, extend the transaction timeout so that we won't get warnings:
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(1*time.Second))
- select {
- case <-time.After(100 * time.Millisecond):
- // increase the time slot allocated for writing the catchpoint, but stop when we get to the longChunkExecutionDuration limit.
- // this would allow the catchpoint writing speed to ramp up while still leaving some cpu available.
- chunkExecutionDuration *= 2
- if chunkExecutionDuration > longChunkExecutionDuration {
- chunkExecutionDuration = longChunkExecutionDuration
- }
- case <-au.ctx.Done():
- retryCatchpointCreation = true
- err2 := catchpointWriter.Abort()
- if err2 != nil {
- return fmt.Errorf("error removing catchpoint file : %v", err2)
- }
- return nil
- case <-au.catchpointSlowWriting:
- chunkExecutionDuration = longChunkExecutionDuration
- }
- }
- if err != nil {
- err = fmt.Errorf("unable to create catchpoint : %v", err)
- err2 := catchpointWriter.Abort()
- if err2 != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
- }
- return
- }
- }
- return
- })
- ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
-
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
- return
- }
- if catchpointWriter == nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
- return
- }
-
- err = au.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
- return
- }
- catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
- catchpointGenerationStats.WritingDuration = uint64(time.Now().Sub(beforeGeneratingCatchpointTime).Nanoseconds())
- catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
- catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
- au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
- au.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
- With("CPUTime", catchpointGenerationStats.CPUTime).
- With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
- With("accountsCount", catchpointGenerationStats.AccountsCount).
- With("fileSize", catchpointGenerationStats.FileSize).
- With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
- Infof("Catchpoint file was generated")
-}
-
-// catchpointRoundToPath calculate the catchpoint file path for a given round
-func catchpointRoundToPath(rnd basics.Round) string {
- irnd := int64(rnd) / 256
- outStr := ""
- for irnd > 0 {
- outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
- irnd = irnd / 256
- }
- outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
- return outStr
-}
-
-// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
-// after a successful insert operation to the database, it would delete up to 2 old entries, as needed.
-// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
-// database and storage realign.
-func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
- if au.catchpointFileHistoryLength != 0 {
- err = au.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
- if err != nil {
- au.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
- return
- }
- } else {
- err = os.Remove(fileName)
- if err != nil {
- au.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
- return
- }
- }
- if au.catchpointFileHistoryLength == -1 {
- return
- }
- var filesToDelete map[basics.Round]string
- filesToDelete, err = au.accountsq.getOldestCatchpointFiles(context.Background(), 2, au.catchpointFileHistoryLength)
- if err != nil {
- return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
- }
- for round, fileToDelete := range filesToDelete {
- absCatchpointFileName := filepath.Join(au.dbDirectory, fileToDelete)
- err = os.Remove(absCatchpointFileName)
- if err == nil || os.IsNotExist(err) {
- // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
- err = nil
- } else {
- // we can't delete the file, abort -
- return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
- }
- err = au.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
- if err != nil {
- return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
- }
- }
- return
+ return au.cachedDBRound + basics.Round(len(au.deltas))
}
// the vacuumDatabase performs a full vacuum of the accounts database.
func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
- if !au.vacuumOnStartup {
- return
- }
-
// vaccumming the database would modify the some of the tables rowid, so we need to make sure any stored in-memory
// rowid are flushed.
au.baseAccounts.prune(0)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 54fd6d4f0..27306af53 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -24,7 +24,6 @@ import (
"fmt"
"io/ioutil"
"os"
- "path/filepath"
"runtime"
"strings"
"sync"
@@ -37,13 +36,18 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
)
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+
type mockLedgerForTracker struct {
dbs db.Pair
blocks []blockEntry
@@ -52,9 +56,26 @@ type mockLedgerForTracker struct {
filename string
inMemory bool
consensusParams config.ConsensusParams
+ accts map[basics.Address]basics.AccountData
+
+ // trackerRegistry manages persistence into DB so we have to have it here even for a single tracker test
+ trackers trackerRegistry
+}
+
+func accumulateTotals(t testing.TB, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, rewardLevel uint64) (totals ledgercore.AccountTotals) {
+ var ot basics.OverflowTracker
+ proto := config.Consensus[consensusVersion]
+ totals.RewardsLevel = rewardLevel
+ for _, ar := range accts {
+ for _, data := range ar {
+ totals.AddAccount(proto, data, &ot)
+ }
+ }
+ require.False(t, ot.Overflowed)
+ return
}
-func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion) *mockLedgerForTracker {
+func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData) *mockLedgerForTracker {
dbs, fileName := dbOpenTest(t, inMemory)
dblogger := logging.TestingLog(t)
dblogger.SetLevel(logging.Info)
@@ -63,11 +84,15 @@ func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount in
blocks := randomInitChain(consensusVersion, initialBlocksCount)
deltas := make([]ledgercore.StateDelta, initialBlocksCount)
+ totals := accumulateTotals(t, consensusVersion, accts, 0)
for i := range deltas {
- deltas[i] = ledgercore.StateDelta{Hdr: &bookkeeping.BlockHeader{}}
+ deltas[i] = ledgercore.StateDelta{
+ Hdr: &bookkeeping.BlockHeader{},
+ Totals: totals,
+ }
}
consensusParams := config.Consensus[consensusVersion]
- return &mockLedgerForTracker{dbs: dbs, log: dblogger, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: consensusParams}
+ return &mockLedgerForTracker{dbs: dbs, log: dblogger, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: consensusParams, accts: accts[0]}
}
// fork creates another database which has the same content as the current one. Works only for non-memory databases.
@@ -85,8 +110,12 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
log: dblogger,
blocks: make([]blockEntry, len(ml.blocks)),
deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
+ accts: make(map[basics.Address]basics.AccountData),
filename: fn,
}
+ for k, v := range ml.accts {
+ newLedgerTracker.accts[k] = v
+ }
copy(newLedgerTracker.blocks, ml.blocks)
copy(newLedgerTracker.deltas, ml.deltas)
@@ -109,6 +138,8 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
}
func (ml *mockLedgerForTracker) Close() {
+ ml.trackers.close()
+
ml.dbs.Close()
// delete the database files of non-memory instances.
if !ml.inMemory {
@@ -128,7 +159,7 @@ func (ml *mockLedgerForTracker) addMockBlock(be blockEntry, delta ledgercore.Sta
return nil
}
-func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
// support returning the deltas if the client explicitly provided them by calling addMockBlock, otherwise,
// just return an empty state delta ( since the client clearly didn't care about these )
if len(ml.deltas) > int(blk.Round()) {
@@ -178,6 +209,10 @@ func (ml *mockLedgerForTracker) GenesisProto() config.ConsensusParams {
return ml.consensusParams
}
+func (ml *mockLedgerForTracker) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return ml.accts
+}
+
// this function used to be in acctupdates.go, but we were never using it for production purposes. This
// function has a conceptual flaw in that it attempts to load the entire balances into memory. This might
// not work if we have large number of balances. On these unit testing, however, it's not the case, and it's
@@ -209,15 +244,28 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address
return
}
+func newAcctUpdates(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *accountUpdates {
+ au := &accountUpdates{}
+ au.initialize(conf)
+ _, err := trackerDBInitialize(l, false, ".")
+ require.NoError(tb, err)
+
+ l.trackers.initialize(l, []ledgerTracker{au}, conf)
+ err = l.trackers.loadFromDisk(l)
+ require.NoError(tb, err)
+
+ return au
+}
+
func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, latestRnd basics.Round, accts []map[basics.Address]basics.AccountData, rewards []uint64, proto config.ConsensusParams) {
latest := au.latest()
- require.Equal(t, latest, latestRnd)
+ require.Equal(t, latestRnd, latest)
_, err := au.Totals(latest + 1)
require.Error(t, err)
var validThrough basics.Round
- _, validThrough, err = au.LookupWithoutRewards(latest+1, randomAddress())
+ _, validThrough, err = au.LookupWithoutRewards(latest+1, ledgertesting.RandomAddress())
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
@@ -225,7 +273,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
_, err := au.Totals(base - 1)
require.Error(t, err)
- _, validThrough, err = au.LookupWithoutRewards(base-1, randomAddress())
+ _, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
}
@@ -284,7 +332,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
- d, validThrough, err := au.LookupWithoutRewards(rnd, randomAddress())
+ d, validThrough, err := au.LookupWithoutRewards(rnd, ledgertesting.RandomAddress())
require.NoError(t, err)
require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd))
require.Equal(t, d, basics.AccountData{})
@@ -317,10 +365,7 @@ func TestAcctUpdates(t *testing.T) {
}
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -333,12 +378,12 @@ func TestAcctUpdates(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- defer au.close()
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+ defer au.close()
// cover 10 genesis blocks
rewardLevel := uint64(0)
@@ -352,13 +397,14 @@ func TestAcctUpdates(t *testing.T) {
// lastCreatableID stores asset or app max used index to get rid of conflicts
lastCreatableID := crypto.RandUint64() % 512
knownCreatables := make(map[basics.CreatableIndex]bool)
+
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
var updates ledgercore.AccountDeltas
var totals map[basics.Address]basics.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = randomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -378,6 +424,7 @@ func TestAcctUpdates(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
au.newBlock(blk, delta)
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
@@ -387,14 +434,35 @@ func TestAcctUpdates(t *testing.T) {
for i := basics.Round(0); i < 15; i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
+ ml.trackers.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
- au.waitAccountsWriting()
+ ml.trackers.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
+ ml.trackers.waitAccountsWriting()
checkAcctUpdates(t, au, i, basics.Round(proto.MaxBalLookback+14), accts, rewardsLevels, proto)
}
-}
+ // check the account totals.
+ var dbRound basics.Round
+ err := ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ require.NoError(t, err)
+
+ var updates ledgercore.AccountDeltas
+ for addr, acctData := range accts[dbRound] {
+ updates.Upsert(addr, acctData)
+ }
+
+ expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{})
+ var actualTotals ledgercore.AccountTotals
+ err = ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ actualTotals, err = accountsTotals(tx, false)
+ return
+ })
+ require.NoError(t, err)
+ require.Equal(t, expectedTotals, actualTotals)
+}
func TestAcctUpdatesFastUpdates(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -403,10 +471,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
}
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -419,15 +484,14 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
// cover 10 genesis blocks
rewardLevel := uint64(0)
for i := 1; i < 10; i++ {
@@ -442,7 +506,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
- updates, totals := randomDeltasBalanced(1, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -469,7 +533,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
wg.Add(1)
go func(round basics.Round) {
defer wg.Done()
- au.committedUpTo(round)
+ ml.trackers.committedUpTo(round)
}(i)
}
wg.Wait()
@@ -494,11 +558,8 @@ func BenchmarkBalancesChanges(b *testing.B) {
initialRounds := uint64(1)
- ml := makeMockLedgerForTracker(b, true, int(initialRounds), protocolVersion)
- defer ml.Close()
-
accountsCount := 5000
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -511,10 +572,11 @@ func BenchmarkBalancesChanges(b *testing.B) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err := au.loadFromDisk(ml)
- require.NoError(b, err)
+ ml := makeMockLedgerForTracker(b, true, int(initialRounds), protocolVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(b, ml, conf, ".")
defer au.close()
// cover initialRounds genesis blocks
@@ -532,7 +594,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
accountChanges = accountsCount - 2 - int(basics.Round(proto.MaxBalLookback+uint64(b.N))+i)
}
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(b, err)
@@ -557,18 +619,18 @@ func BenchmarkBalancesChanges(b *testing.B) {
}
for i := proto.MaxBalLookback; i < proto.MaxBalLookback+initialRounds; i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(i))
+ ml.trackers.lastFlushTime = time.Time{}
+ ml.trackers.committedUpTo(basics.Round(i))
}
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
b.ResetTimer()
startTime := time.Now()
for i := proto.MaxBalLookback + initialRounds; i < proto.MaxBalLookback+uint64(b.N); i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(i))
+ ml.trackers.lastFlushTime = time.Time{}
+ ml.trackers.committedUpTo(basics.Round(i))
}
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
deltaTime := time.Now().Sub(startTime)
if deltaTime > time.Second {
return
@@ -626,9 +688,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
os.RemoveAll("./catchpoints")
}()
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion)
- defer ml.Close()
- accts := []map[basics.Address]basics.AccountData{randomAccounts(100000, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(100000, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -641,14 +701,14 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, accts)
+ defer ml.Close()
+
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts[0])
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
// cover 10 genesis blocks
rewardLevel := uint64(0)
@@ -660,7 +720,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
for i := basics.Round(10); i < basics.Round(protoParams.MaxBalLookback+5); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
- updates, totals := randomDeltasBalanced(1, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -684,9 +744,9 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
- au.committedUpTo(i)
+ ml.trackers.committedUpTo(i)
if i%2 == 1 {
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
}
}
}
@@ -717,10 +777,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
inMemory := true
testFunction := func(t *testing.T) {
- ml := makeMockLedgerForTracker(t, inMemory, 10, testProtocolVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(9, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(9, true)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -732,6 +789,9 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
+ ml := makeMockLedgerForTracker(t, inMemory, 10, testProtocolVersion, accts)
+ defer ml.Close()
+
var moneyAccounts []basics.Address
for addr := range accts[0] {
@@ -749,13 +809,10 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
accts[0][addr] = accountData
}
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", protoParams, accts[0])
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
// cover 10 genesis blocks
rewardLevel := uint64(0)
for i := 1; i < 10; i++ {
@@ -846,10 +903,10 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
delta.Accts.Upsert(addr, ad)
}
au.newBlock(blk, delta)
- au.committedUpTo(i)
+ ml.trackers.committedUpTo(i)
}
lastRound := i - 1
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
for idx, addr := range moneyAccounts {
balance, validThrough, err := au.LookupWithoutRewards(lastRound, addr)
@@ -869,55 +926,6 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
t.Run("DiskDB", testFunction)
}
-// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
-// it doing so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
-// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test call the function
-// and ensures that it did not errored, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
-// entries.
-func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
- au := &accountUpdates{}
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- dummyCatchpointFilesToCreate := 42
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- f, err := os.Create(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
- require.NoError(t, err)
- err = f.Close()
- require.NoError(t, err)
- }
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- err := au.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fmt.Sprintf("./dummy_catchpoint_file-%d", i), "", 0)
- require.NoError(t, err)
- }
- err = au.deleteStoredCatchpoints(context.Background(), au.accountsq)
- require.NoError(t, err)
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- // ensure that all the files were deleted.
- _, err := os.Open(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
- require.True(t, os.IsNotExist(err))
- }
- fileNames, err := au.accountsq.getOldestCatchpointFiles(context.Background(), dummyCatchpointFilesToCreate, 0)
- require.NoError(t, err)
- require.Equal(t, 0, len(fileNames))
-}
-
// listAndCompareComb lists the assets/applications and then compares against the expected
// It repeats with different combinations of the limit parameters
func listAndCompareComb(t *testing.T, au *accountUpdates, expected map[basics.CreatableIndex]ledgercore.ModifiedCreatable) {
@@ -1055,7 +1063,7 @@ func TestListCreatables(t *testing.T) {
require.NoError(t, err)
au := &accountUpdates{}
- au.accountsq, err = accountsDbInit(tx, tx)
+ au.accountsq, err = accountsInitDbQueries(tx, tx)
require.NoError(t, err)
// ******* All results are obtained from the cache. Empty database *******
@@ -1097,96 +1105,6 @@ func TestListCreatables(t *testing.T) {
listAndCompareComb(t, au, expectedDbImage)
}
-func TestIsWritingCatchpointFile(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- au := &accountUpdates{}
-
- au.catchpointWriting = -1
- ans := au.IsWritingCatchpointFile()
- require.True(t, ans)
-
- au.catchpointWriting = 0
- ans = au.IsWritingCatchpointFile()
- require.False(t, ans)
-}
-
-func TestGetCatchpointStream(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
- au := &accountUpdates{}
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- filesToCreate := 4
-
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
- catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
- err = os.Mkdir(catchpointsDirectory, 0777)
- require.NoError(t, err)
-
- au.dbDirectory = temporaryDirectroy
-
- // Create the catchpoint files with dummy data
- for i := 0; i < filesToCreate; i++ {
- fileName := filepath.Join("catchpoints", fmt.Sprintf("%d.catchpoint", i))
- data := []byte{byte(i), byte(i + 1), byte(i + 2)}
- err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666)
- require.NoError(t, err)
-
- // Store the catchpoint into the database
- err := au.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", int64(len(data)))
- require.NoError(t, err)
- }
-
- dataRead := make([]byte, 3)
- var n int
-
- // File on disk, and database has the record
- reader, err := au.GetCatchpointStream(basics.Round(1))
- n, err = reader.Read(dataRead)
- require.NoError(t, err)
- require.Equal(t, 3, n)
- outData := []byte{1, 2, 3}
- require.Equal(t, outData, dataRead)
- len, err := reader.Size()
- require.NoError(t, err)
- require.Equal(t, int64(3), len)
-
- // File deleted, but record in the database
- err = os.Remove(filepath.Join(temporaryDirectroy, "catchpoints", "2.catchpoint"))
- reader, err = au.GetCatchpointStream(basics.Round(2))
- require.Equal(t, ledgercore.ErrNoEntry{}, err)
- require.Nil(t, reader)
-
- // File on disk, but database lost the record
- err = au.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0)
- reader, err = au.GetCatchpointStream(basics.Round(3))
- n, err = reader.Read(dataRead)
- require.NoError(t, err)
- require.Equal(t, 3, n)
- outData = []byte{3, 4, 5}
- require.Equal(t, outData, dataRead)
-
- err = au.deleteStoredCatchpoints(context.Background(), au.accountsq)
- require.NoError(t, err)
-}
-
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT address, data FROM accountbase")
if err != nil {
@@ -1226,10 +1144,7 @@ func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err er
func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(5, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -1241,21 +1156,20 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
+ ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
cfg := config.GetDefaultLocal()
cfg.Archival = true
- au.initialize(cfg, ".", proto, accts[0])
+ au := newAcctUpdates(b, ml, cfg, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(b, err)
-
// at this point, the database was created. We want to fill the accounts data
accountsNumber := 6000000 * b.N
for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
var updates compactAccountDeltas
for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
acctData := basics.AccountData{}
acctData.MicroAlgos.Raw = 1
updates.upsert(addr, accountDelta{new: acctData})
@@ -1269,87 +1183,20 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
require.NoError(b, err)
}
- err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- return updateAccountsRound(tx, 0, 1)
+ err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ return updateAccountsHashRound(tx, 1)
})
require.NoError(b, err)
au.close()
b.ResetTimer()
- err = au.loadFromDisk(ml)
+ err = au.loadFromDisk(ml, 0)
require.NoError(b, err)
b.StopTimer()
b.ReportMetric(float64(accountsNumber), "entries/trie")
}
-func BenchmarkLargeCatchpointWriting(b *testing.B) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(5, true)}
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[0][testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[0][testSinkAddr] = sinkdata
-
- au := &accountUpdates{}
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- au.initialize(cfg, ".", proto, accts[0])
- defer au.close()
-
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
- require.NoError(b, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
- catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
- err = os.Mkdir(catchpointsDirectory, 0777)
- require.NoError(b, err)
-
- au.dbDirectory = temporaryDirectroy
-
- err = au.loadFromDisk(ml)
- require.NoError(b, err)
-
- // at this point, the database was created. We want to fill the accounts data
- accountsNumber := 6000000 * b.N
- err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
- var updates compactAccountDeltas
- for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
- addr := randomAddress()
- acctData := basics.AccountData{}
- acctData.MicroAlgos.Raw = 1
- updates.upsert(addr, accountDelta{new: acctData})
- i++
- }
-
- _, err = accountsNewRound(tx, updates, nil, proto, basics.Round(1))
- if err != nil {
- return
- }
- }
-
- return updateAccountsRound(tx, 0, 1)
- })
- require.NoError(b, err)
-
- b.ResetTimer()
- au.generateCatchpoint(basics.Round(0), "0#ABCD", crypto.Digest{}, time.Second)
- b.StopTimer()
- b.ReportMetric(float64(accountsNumber), "accounts")
-}
-
func BenchmarkCompactDeltas(b *testing.B) {
b.Run("account-deltas", func(b *testing.B) {
if b.N < 500 {
@@ -1441,129 +1288,6 @@ func TestCompactDeltas(t *testing.T) {
}
-func TestReproducibleCatchpointLabels(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is too slow on ARM and causes travis builds to time out")
- }
- // create new protocol version, which has lower lookback
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 32
- protoParams.SeedLookback = 2
- protoParams.SeedRefreshInterval = 8
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
- ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion)
- defer ml.Close()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
- rewardsLevels := []uint64{0}
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[0][testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[0][testSinkAddr] = sinkdata
-
- au := &accountUpdates{}
- cfg := config.GetDefaultLocal()
- cfg.CatchpointInterval = 50
- cfg.CatchpointTracking = 1
- au.initialize(cfg, ".", protoParams, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- rewardLevel := uint64(0)
-
- const testCatchpointLabelsCount = 5
-
- // lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
- knownCreatables := make(map[basics.CreatableIndex]bool)
- catchpointLabels := make(map[basics.Round]string)
- ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
- roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
- for i := basics.Round(1); i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
- rewardLevelDelta := crypto.RandUint64() % 5
- rewardLevel += rewardLevelDelta
- var updates ledgercore.AccountDeltas
- var totals map[basics.Address]basics.AccountData
- base := accts[i-1]
- updates, totals, lastCreatableID = randomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
- require.NoError(t, err)
-
- newPool := totals[testPoolAddr]
- newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
- updates.Upsert(testPoolAddr, newPool)
- totals[testPoolAddr] = newPool
-
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
- }
- blk.RewardsLevel = rewardLevel
- blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
- delta.Accts.MergeAccounts(updates)
- delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
- au.newBlock(blk, delta)
- au.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- accts = append(accts, totals)
- rewardsLevels = append(rewardsLevels, rewardLevel)
- roundDeltas[i] = delta
-
- // if this is a catchpoint round, save the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
- au.waitAccountsWriting()
- catchpointLabels[i] = au.GetLastCatchpointLabel()
- ledgerHistory[i] = ml.fork(t)
- defer ledgerHistory[i].Close()
- }
- }
-
- // test in revese what happens when we try to repeat the exact same blocks.
- // start off with the catchpoint before the last one
- startingRound := basics.Round((testCatchpointLabelsCount - 1) * cfg.CatchpointInterval)
- for ; startingRound > basics.Round(cfg.CatchpointInterval); startingRound -= basics.Round(cfg.CatchpointInterval) {
- au.close()
- err := au.loadFromDisk(ledgerHistory[startingRound])
- require.NoError(t, err)
-
- for i := startingRound + 1; i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
- }
- blk.RewardsLevel = rewardsLevels[i]
- blk.CurrentProtocol = testProtocolVersion
- delta := roundDeltas[i]
- au.newBlock(blk, delta)
- au.committedUpTo(i)
-
- // if this is a catchpoint round, check the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
- au.waitAccountsWriting()
- require.Equal(t, catchpointLabels[i], au.GetLastCatchpointLabel())
- }
- }
- }
-}
-
// TestCachesInitialization test the functionality of the initializeCaches cache.
func TestCachesInitialization(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1572,13 +1296,9 @@ func TestCachesInitialization(t *testing.T) {
proto := config.Consensus[protocolVersion]
initialRounds := uint64(1)
-
- ml := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion)
- ml.log.SetLevel(logging.Warn)
- defer ml.Close()
-
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1591,10 +1311,12 @@ func TestCachesInitialization(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion, accts)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
// cover initialRounds genesis blocks
rewardLevel := uint64(0)
@@ -1610,7 +1332,7 @@ func TestCachesInitialization(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1629,17 +1351,21 @@ func TestCachesInitialization(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
- au.committedUpTo(basics.Round(i))
- au.waitAccountsWriting()
+ ml.trackers.committedUpTo(basics.Round(i))
+ ml.trackers.waitAccountsWriting()
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
au.close()
+ // reset the accounts, since their balances are now changed due to the rewards.
+ accts = []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
+
// create another mocked ledger, but this time with a fresh new tracker database.
- ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion)
+ ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion, accts)
ml2.log.SetLevel(logging.Warn)
defer ml2.Close()
@@ -1647,15 +1373,13 @@ func TestCachesInitialization(t *testing.T) {
ml2.blocks = ml.blocks
ml2.deltas = ml.deltas
- au = &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err = au.loadFromDisk(ml2)
- require.NoError(t, err)
+ conf = config.GetDefaultLocal()
+ au = newAcctUpdates(t, ml2, conf, ".")
defer au.close()
// make sure the deltas array end up containing only the most recent 320 rounds.
require.Equal(t, int(proto.MaxBalLookback), len(au.deltas))
- require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.dbRound)
+ require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.cachedDBRound)
}
// TestSplittingConsensusVersionCommits tests the a sequence of commits that spans over multiple consensus versions works correctly.
@@ -1667,12 +1391,8 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
initialRounds := uint64(1)
- ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion)
- ml.log.SetLevel(logging.Warn)
- defer ml.Close()
-
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1685,9 +1405,14 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
- err := au.loadFromDisk(ml)
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion, accts)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
defer au.close()
@@ -1706,7 +1431,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1725,6 +1450,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
accts = append(accts, totals)
@@ -1740,7 +1466,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1759,15 +1485,16 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- // now, commit and verify that the committedUpTo method broken the range correctly.
- au.committedUpTo(lastRoundToWrite)
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+ // now, commit and verify that the produceCommittingTask method broken the range correctly.
+ ml.trackers.committedUpTo(lastRoundToWrite)
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound)
}
@@ -1781,12 +1508,8 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
initialRounds := uint64(1)
- ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion)
- ml.log.SetLevel(logging.Warn)
- defer ml.Close()
-
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1799,9 +1522,14 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
- err := au.loadFromDisk(ml)
+ ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion, accts)
+ ml.log.SetLevel(logging.Warn)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
defer au.close()
@@ -1820,7 +1548,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1839,6 +1567,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
accts = append(accts, totals)
@@ -1853,7 +1582,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1872,15 +1601,16 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- // now, commit and verify that the committedUpTo method broken the range correctly.
- au.committedUpTo(endOfFirstNewProtocolSegment)
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+ // now, commit and verify that the produceCommittingTask method broken the range correctly.
+ ml.trackers.committedUpTo(endOfFirstNewProtocolSegment)
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound)
// write additional extraRounds elements and verify these can be flushed.
for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+initialProtoParams.MaxBalLookback); i++ {
@@ -1888,7 +1618,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1907,14 +1637,15 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- au.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds))
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.dbRound)
+ ml.trackers.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds))
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.cachedDBRound)
}
// TestConsecutiveVersion tests the consecutiveVersion method correctness.
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 038207749..776832968 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -17,356 +17,26 @@
package ledger
import (
- "crypto/rand"
"encoding/hex"
- "fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-func getRandomAddress(a *require.Assertions) basics.Address {
- const rl = 16
- b := make([]byte, rl)
- n, err := rand.Read(b)
- a.NoError(err)
- a.Equal(rl, n)
-
- address := crypto.Hash(b)
- return basics.Address(address)
-}
-
-type creatableLocator struct {
- cidx basics.CreatableIndex
- ctype basics.CreatableType
-}
-type storeLocator struct {
- addr basics.Address
- aidx basics.AppIndex
- global bool
-}
-type mockCowForLogicLedger struct {
- rnd basics.Round
- ts int64
- cr map[creatableLocator]basics.Address
- brs map[basics.Address]basics.AccountData
- stores map[storeLocator]basics.TealKeyValue
- tcs map[int]basics.CreatableIndex
- txc uint64
-}
-
-func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br, nil
-}
-
-func (c *mockCowForLogicLedger) GetCreatableID(groupIdx int) basics.CreatableIndex {
- return c.tcs[groupIdx]
-}
-
-func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- addr, found := c.cr[creatableLocator{cidx, ctype}]
- return addr, found, nil
-}
-
-func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- tv, found := kv[key]
- return tv, found, nil
-}
-
-func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
- return transactions.EvalDelta{}, nil
-}
-
-func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- kv[key] = value
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- delete(kv, key)
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) round() basics.Round {
- return c.rnd
-}
-
-func (c *mockCowForLogicLedger) prevTimestamp() int64 {
- return c.ts
-}
-
-func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- _, found := c.stores[storeLocator{addr, aidx, global}]
- return found, nil
-}
-
-func (c *mockCowForLogicLedger) incTxnCount() {
- c.txc++
-}
-
-func (c *mockCowForLogicLedger) txnCounter() uint64 {
- return c.txc
-}
-
-func newCowMock(creatables []modsData) *mockCowForLogicLedger {
- var m mockCowForLogicLedger
- m.cr = make(map[creatableLocator]basics.Address, len(creatables))
- for _, e := range creatables {
- m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
- }
- return &m
-}
-
-func TestLogicLedgerMake(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- _, err := newLogicLedger(nil, 0)
- a.Error(err)
- a.Contains(err.Error(), "cannot make logic ledger for app index 0")
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
-
- c := &mockCowForLogicLedger{}
- _, err = newLogicLedger(c, 0)
- a.Error(err)
- a.Contains(err.Error(), "cannot make logic ledger for app index 0")
-
- _, err = newLogicLedger(c, aidx)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", aidx))
-
- c = newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
- a.Equal(aidx, l.aidx)
- a.Equal(c, l.cow)
-}
-
-func TestLogicLedgerBalances(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- addr1 := getRandomAddress(a)
- ble := basics.MicroAlgos{Raw: 100}
- c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
- bla, err := l.Balance(addr1)
- a.NoError(err)
- a.Equal(ble, bla)
-}
-
-func TestLogicLedgerGetters(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- round := basics.Round(1234)
- c.rnd = round
- ts := int64(11223344)
- c.ts = ts
-
- addr1 := getRandomAddress(a)
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
- a.Equal(aidx, l.ApplicationID())
- a.Equal(round, l.Round())
- a.Equal(ts, l.LatestTimestamp())
- a.True(l.OptedIn(addr1, 0))
- a.True(l.OptedIn(addr1, aidx))
- a.False(l.OptedIn(addr, 0))
- a.False(l.OptedIn(addr, aidx))
-}
-
-func TestLogicLedgerAsset(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- addr1 := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- _, _, err = l.AssetParams(basics.AssetIndex(aidx))
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
- }
-
- ap, creator, err := l.AssetParams(assetIdx)
- a.NoError(err)
- a.Equal(addr1, creator)
- a.Equal(uint64(1000), ap.Total)
-
- _, err = l.AssetHolding(addr1, assetIdx)
- a.Error(err)
- a.Contains(err.Error(), "has not opted in to asset")
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {
- AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
- Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
- },
- }
-
- ah, err := l.AssetHolding(addr1, assetIdx)
- a.NoError(err)
- a.Equal(uint64(99), ah.Amount)
-}
-
-func TestLogicLedgerGetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- addr1 := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
- val, ok, err := l.GetGlobal(aidx, "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- val, ok, err = l.GetGlobal(aidx, "gkey")
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-}
-
-func TestLogicLedgerSetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- err = l.SetGlobal("gkey", tv)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.SetGlobal("gkey", tv2)
- a.NoError(err)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- err = l.SetLocal(addr, "lkey", tv2, 0)
- a.NoError(err)
-}
-
-func TestLogicLedgerDelKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- err = l.DelGlobal("gkey")
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.DelGlobal("gkey")
- a.NoError(err)
-
- addr1 := getRandomAddress(a)
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
- err = l.DelLocal(addr1, "lkey", 0)
- a.NoError(err)
+func commitRound(offset uint64, dbRound basics.Round, l *Ledger) {
+ l.trackers.lastFlushTime = time.Time{}
+ l.trackers.scheduleCommit(l.Latest(), l.Latest()-(dbRound+basics.Round(offset)))
+ l.trackers.waitAccountsWriting()
}
// test ensures that
@@ -374,6 +44,7 @@ func TestLogicLedgerDelKey(t *testing.T) {
// before and after application code refactoring
// 2) writing into empty (opted-in) local state's KeyValue works after reloading
// Hardcoded values are from commit 9a0b439 (pre app refactor commit)
+
func TestAppAccountDataStorage(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -433,7 +104,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -460,14 +131,6 @@ return`
l, err := OpenLedger(logging.Base(), "TestAppAccountData", true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
txHeader := transactions.Header{
Sender: creator,
@@ -522,9 +185,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
appCallFields = transactions.ApplicationCallTxnFields{
OnCompletion: 0,
@@ -543,9 +204,7 @@ return`
a.NoError(err)
// save data into DB
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(1, 3, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(1, 3, l)
// dump accounts
var rowid int64
@@ -553,18 +212,23 @@ return`
var buf []byte
err = l.accts.accountsq.lookupStmt.QueryRow(creator[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedCreator, buf)
err = l.accts.accountsq.lookupStmt.QueryRow(userOptin[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedUserOptIn, buf)
pad, err := l.accts.accountsq.lookup(userOptin)
+ a.NoError(err)
a.Nil(pad.accountData.AppLocalStates[appIdx].KeyValue)
ad, err := l.Lookup(dbRound, userOptin)
+ a.NoError(err)
a.Nil(ad.AppLocalStates[appIdx].KeyValue)
err = l.accts.accountsq.lookupStmt.QueryRow(userLocal[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedUserLocal, buf)
ad, err = l.Lookup(dbRound, userLocal)
@@ -664,7 +328,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -678,14 +342,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -751,9 +407,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
// check first write
blk, err := l.Block(2)
@@ -807,9 +461,7 @@ return`
a.NoError(err)
// save data into DB
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 3, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 3, l)
// check first write
blk, err = l.Block(4)
@@ -919,7 +571,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -933,14 +585,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1034,9 +678,7 @@ return`
l.WaitForCommit(3)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
// check first write
blk, err = l.Block(2)
@@ -1078,7 +720,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1092,14 +734,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1174,9 +808,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 0, l)
// check first write
blk, err = l.Block(1)
@@ -1279,7 +911,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
// explicitly trigger compatibility mode
proto := config.Consensus[protocol.ConsensusV24]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusV24, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusV24, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1293,14 +925,6 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1361,9 +985,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 0, l)
// check first write
blk, err := l.Block(2)
diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go
index 988ae8cb9..f6a91ed8e 100644
--- a/ledger/apply/apply.go
+++ b/ledger/apply/apply.go
@@ -64,3 +64,26 @@ type Balances interface {
// to a ConsensusParams. This returns those parameters.
ConsensusParams() config.ConsensusParams
}
+
+// Rekey updates tx.Sender's AuthAddr to tx.RekeyTo, if provided
+func Rekey(balances Balances, tx *transactions.Transaction) error {
+ if (tx.RekeyTo != basics.Address{}) {
+ acct, err := balances.Get(tx.Sender, false)
+ if err != nil {
+ return err
+ }
+ // Special case: rekeying to the account's actual address just sets acct.AuthAddr to 0
+ // This saves 32 bytes in your balance record if you want to go back to using your original key
+ if tx.RekeyTo == tx.Sender {
+ acct.AuthAddr = basics.Address{}
+ } else {
+ acct.AuthAddr = tx.RekeyTo
+ }
+
+ err = balances.Put(tx.Sender, acct)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go
index 845bb54c4..6166c5d68 100644
--- a/ledger/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -20,10 +20,21 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
)
// main wraps up some TEAL source in a header and footer so that it is
@@ -37,11 +48,152 @@ func main(source string) string {
end: int 1`, source)
}
+// newTestLedger creates a in memory Ledger that is as realistic as
+// possible. It has Rewards and FeeSink properly configured.
+func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture, balances, "test", genHash)
+ require.NoError(t, err)
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: genBlock,
+ Accounts: balances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ return l
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func (ledger *Ledger) nextBlock(t testing.TB) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
+ require.NoError(t, err)
+ return eval
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func (ledger *Ledger) endBlock(t testing.TB, eval testingEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.BlockEvaluator.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+// lookup gets the current accountdata for an address
+func (ledger *Ledger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
+ rnd := ledger.Latest()
+ ad, err := ledger.Lookup(rnd, addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func (ledger *Ledger) micros(t testing.TB, addr basics.Address) uint64 {
+ return ledger.lookup(t, addr).MicroAlgos.Raw
+}
+
+// asa gets the current balance and optin status for some asa for an address
+func (ledger *Ledger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func (ledger *Ledger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
+
+type testingEvaluator struct {
+ *internal.BlockEvaluator
+ ledger *Ledger
+}
+
+func (eval *testingEvaluator) fillDefaults(txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() {
+ txn.GenesisHash = eval.ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+ txn.FillDefaults(eval.ledger.genesisProto)
+}
+
+func (eval *testingEvaluator) txn(t testing.TB, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ eval.fillDefaults(txn)
+ stxn := txn.SignedTxn()
+ err := eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.Len(t, problem, 0)
+}
+
+func (eval *testingEvaluator) txns(t testing.TB, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn := range txns {
+ eval.txn(t, txn)
+ }
+}
+
+func (eval *testingEvaluator) txgroup(t testing.TB, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ eval.fillDefaults(txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ err := eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return err
+ }
+
+ err = eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+ return err
+}
+
// TestPayAction ensures a pay in teal affects balances
func TestPayAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -75,12 +227,12 @@ func TestPayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund, &payout1)
vb := l.endBlock(t, eval)
// AD contains expected appIndex
- require.Equal(t, ai, vb.blk.Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, ai, vb.Block().Payset[0].ApplyData.ApplicationID)
ad0 := l.micros(t, addrs[0])
ad1 := l.micros(t, addrs[1])
@@ -95,11 +247,11 @@ func TestPayAction(t *testing.T) {
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
payout2 := txntest.Txn{
Type: "appl",
Sender: addrs[1],
@@ -109,15 +261,17 @@ func TestPayAction(t *testing.T) {
eval.txn(t, &payout2)
// confirm that modifiedAccounts can see account in inner txn
found := false
- for _, addr := range eval.state.modifiedAccounts() {
+ vb = l.endBlock(t, eval)
+
+ deltas := vb.Delta()
+ for _, addr := range deltas.Accts.ModifiedAccounts() {
if addr == addrs[2] {
found = true
}
}
require.True(t, found)
- l.endBlock(t, eval)
- payInBlock := eval.block.Payset[0]
+ payInBlock := vb.Block().Payset[0]
rewards := payInBlock.ApplyData.SenderRewards.Raw
require.Greater(t, rewards, uint64(2000)) // some biggish number
inners := payInBlock.ApplyData.EvalDelta.InnerTxns
@@ -150,23 +304,23 @@ func TestPayAction(t *testing.T) {
Receiver: ai.Address(),
Amount: 10 * 1000 * 1000000, // account min balance, plus fees
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &tenkalgos)
l.endBlock(t, eval)
beforepay := l.micros(t, ai.Address())
// Build up Residue in RewardsState so it's ready to pay again
for i := 1; i < 10; i++ {
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, payout2.Noted("2"))
- l.endBlock(t, eval)
+ vb = l.endBlock(t, eval)
afterpay := l.micros(t, ai.Address())
- payInBlock = eval.block.Payset[0]
+ payInBlock = vb.Block().Payset[0]
inners = payInBlock.ApplyData.EvalDelta.InnerTxns
require.Len(t, inners, 1)
@@ -180,7 +334,7 @@ func TestPayAction(t *testing.T) {
func TestAxferAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -231,14 +385,14 @@ submit: itxn_submit
`),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &asa, &app)
vb := l.endBlock(t, eval)
asaIndex := basics.AssetIndex(1)
- require.Equal(t, asaIndex, vb.blk.Payset[0].ApplyData.ConfigAsset)
+ require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
appIndex := basics.AppIndex(2)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplyData.ApplicationID)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
fund := txntest.Txn{
Type: "pay",
@@ -248,7 +402,7 @@ submit: itxn_submit
// stay under 1M, to avoid rewards complications
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fund)
l.endBlock(t, eval)
@@ -261,7 +415,7 @@ submit: itxn_submit
}
// Fail, because app account is not opted in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fundgold, fmt.Sprintf("asset %d missing", asaIndex))
l.endBlock(t, eval)
@@ -278,7 +432,7 @@ submit: itxn_submit
}
// Tell the app to opt itself in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &optin)
l.endBlock(t, eval)
@@ -287,7 +441,7 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, suceed, because opted in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fundgold)
l.endBlock(t, eval)
@@ -303,7 +457,7 @@ submit: itxn_submit
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &withdraw)
l.endBlock(t, eval)
@@ -311,7 +465,7 @@ submit: itxn_submit
require.True(t, in)
require.Equal(t, amount, uint64(10000))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, withdraw.Noted("2"))
l.endBlock(t, eval)
@@ -319,7 +473,7 @@ submit: itxn_submit
require.True(t, in) // Zero left, but still opted in
require.Equal(t, amount, uint64(0))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, withdraw.Noted("3"), "underflow on subtracting")
l.endBlock(t, eval)
@@ -336,7 +490,7 @@ submit: itxn_submit
Accounts: []basics.Address{addrs[0]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &close)
l.endBlock(t, eval)
@@ -345,13 +499,13 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, fail again, opted out
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, fundgold.Noted("2"), fmt.Sprintf("asset %d missing", asaIndex))
l.endBlock(t, eval)
// Do it all again, so we can test closeTo when we have a non-zero balance
// Tell the app to opt itself in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, optin.Noted("a"), fundgold.Noted("a"))
l.endBlock(t, eval)
@@ -359,7 +513,7 @@ submit: itxn_submit
require.Equal(t, uint64(20000), amount)
left, _ := l.asa(t, addrs[0], asaIndex)
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, close.Noted("a"))
l.endBlock(t, eval)
@@ -373,7 +527,7 @@ submit: itxn_submit
func TestClawbackAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -424,12 +578,12 @@ func TestClawbackAction(t *testing.T) {
AssetReceiver: addrs[1],
XferAsset: asaIndex,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &asa, &app, &optin)
vb := l.endBlock(t, eval)
- require.Equal(t, asaIndex, vb.blk.Payset[0].ApplyData.ConfigAsset)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplyData.ApplicationID)
+ require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
bystander := addrs[2] // Has no authority of its own
overpay := txntest.Txn{
@@ -445,7 +599,7 @@ func TestClawbackAction(t *testing.T) {
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0], addrs[1]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txgroup(t, &overpay, &clawmove)
l.endBlock(t, eval)
@@ -457,7 +611,7 @@ func TestClawbackAction(t *testing.T) {
func TestRekeyAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -493,7 +647,7 @@ skipclose:
RekeyTo: appIndex.Address(),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &ezpayer, &rekey)
l.endBlock(t, eval)
@@ -503,7 +657,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]}, // pay 2 from 0 (which was rekeyed)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &useacct)
l.endBlock(t, eval)
@@ -520,7 +674,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[2], addrs[0]}, // pay 0 from 2
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &baduse, "unauthorized")
l.endBlock(t, eval)
@@ -533,7 +687,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2], addrs[3]}, // close to 3
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &close)
l.endBlock(t, eval)
@@ -545,13 +699,13 @@ skipclose:
Receiver: addrs[0],
Amount: 10_000_000,
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &payback)
l.endBlock(t, eval)
require.Equal(t, uint64(10_000_000), l.micros(t, addrs[0]))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, useacct.Noted("2"), "unauthorized")
l.endBlock(t, eval)
}
@@ -561,7 +715,7 @@ skipclose:
func TestRekeyActionCloseAccount(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -617,7 +771,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
Amount: 1_000_000,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &rekey, &fund)
l.endBlock(t, eval)
@@ -627,7 +781,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &useacct, "unauthorized")
l.endBlock(t, eval)
}
@@ -636,7 +790,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
func TestDuplicatePayAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -678,14 +832,14 @@ func TestDuplicatePayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund, &paytwice, create.Noted("in same block"))
vb := l.endBlock(t, eval)
- require.Equal(t, appIndex, vb.blk.Payset[0].ApplyData.ApplicationID)
- require.Equal(t, 4, len(vb.blk.Payset))
+ require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, 4, len(vb.Block().Payset))
// create=1, fund=2, payTwice=3,4,5
- require.Equal(t, basics.AppIndex(6), vb.blk.Payset[3].ApplyData.ApplicationID)
+ require.Equal(t, basics.AppIndex(6), vb.Block().Payset[3].ApplyData.ApplicationID)
ad0 := l.micros(t, addrs[0])
ad1 := l.micros(t, addrs[1])
@@ -699,19 +853,19 @@ func TestDuplicatePayAction(t *testing.T) {
require.Equal(t, 188000, int(app))
// Now create another app, and see if it gets the index we expect.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, create.Noted("again"))
vb = l.endBlock(t, eval)
// create=1, fund=2, payTwice=3,4,5, insameblock=6
- require.Equal(t, basics.AppIndex(7), vb.blk.Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, basics.AppIndex(7), vb.Block().Payset[0].ApplyData.ApplicationID)
}
// TestInnerTxCount ensures that inner transactions increment the TxnCounter
func TestInnerTxnCount(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -744,22 +898,22 @@ func TestInnerTxnCount(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund)
vb := l.endBlock(t, eval)
- require.Equal(t, 2, int(vb.blk.TxnCounter))
+ require.Equal(t, 2, int(vb.Block().TxnCounter))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &payout1)
vb = l.endBlock(t, eval)
- require.Equal(t, 4, int(vb.blk.TxnCounter))
+ require.Equal(t, 4, int(vb.Block().TxnCounter))
}
// TestAcfgAction ensures assets can be created and configured in teal
func TestAcfgAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -786,33 +940,78 @@ func TestAcfgAction(t *testing.T) {
itxn_field ConfigAssetName
byte "https://gold.rush/"
itxn_field ConfigAssetURL
- global CurrentApplicationAddress
+
+ global CurrentApplicationAddress
dup
dup2
+ itxn_field ConfigAssetManager
+ itxn_field ConfigAssetReserve
+ itxn_field ConfigAssetFreeze
+ itxn_field ConfigAssetClawback
+ b submit
+manager:
+ // Put the current values in the itxn
+ txn Assets 0
+ asset_params_get AssetManager
+ assert // exists
itxn_field ConfigAssetManager
+
+ txn Assets 0
+ asset_params_get AssetReserve
+ assert // exists
itxn_field ConfigAssetReserve
+
+ txn Assets 0
+ asset_params_get AssetFreeze
+ assert // exists
itxn_field ConfigAssetFreeze
+
+ txn Assets 0
+ asset_params_get AssetClawback
+ assert // exists
itxn_field ConfigAssetClawback
- b submit
-manager:
+
+
txn ApplicationArgs 0
byte "manager"
==
bz reserve
+ txn Assets 0
+ itxn_field ConfigAsset
+ txn ApplicationArgs 1
+ itxn_field ConfigAssetManager
+ b submit
reserve:
txn ApplicationArgs 0
byte "reserve"
==
bz freeze
+ txn Assets 0
+ itxn_field ConfigAsset
+ txn ApplicationArgs 1
+ itxn_field ConfigAssetReserve
+ b submit
freeze:
txn ApplicationArgs 0
byte "freeze"
==
bz clawback
+ txn Assets 0
+ itxn_field ConfigAsset
+ txn ApplicationArgs 1
+ itxn_field ConfigAssetFreeze
+ b submit
clawback:
txn ApplicationArgs 0
- byte "manager"
+ byte "clawback"
==
+ bz error
+ txn Assets 0
+ itxn_field ConfigAsset
+ txn ApplicationArgs 1
+ itxn_field ConfigAssetClawback
+ b submit
+error: err
submit: itxn_submit
`),
}
@@ -824,7 +1023,7 @@ submit: itxn_submit
Amount: 200_000, // exactly account min balance + one asset
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &app, &fund)
l.endBlock(t, eval)
@@ -835,14 +1034,14 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte("create")},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
// Can't create an asset if you have exactly 200,000 and need to pay fee
eval.txn(t, &createAsa, "balance 199000 below min 200000")
// fund it some more and try again
eval.txns(t, fund.Noted("more!"), &createAsa)
vb := l.endBlock(t, eval)
- asaIndex := vb.blk.Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
+ asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
require.Equal(t, basics.AssetIndex(5), asaIndex)
asaParams, err := l.asaParams(t, basics.AssetIndex(5))
@@ -854,6 +1053,33 @@ submit: itxn_submit
require.Equal(t, "Gold", asaParams.AssetName)
require.Equal(t, "https://gold.rush/", asaParams.URL)
+ require.Equal(t, appIndex.Address(), asaParams.Manager)
+
+ for _, a := range []string{"reserve", "freeze", "clawback", "manager"} {
+ check := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appIndex,
+ ApplicationArgs: [][]byte{[]byte(a), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
+ ForeignAssets: []basics.AssetIndex{asaIndex},
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ t.Log(a)
+ eval.txn(t, &check)
+ l.endBlock(t, eval)
+ }
+ // Not the manager anymore so this won't work
+ nodice := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appIndex,
+ ApplicationArgs: [][]byte{[]byte("freeze"), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
+ ForeignAssets: []basics.AssetIndex{asaIndex},
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txn(t, &nodice, "this transaction should be issued by the manager")
+ l.endBlock(t, eval)
+
}
// TestAsaDuringInit ensures an ASA can be made while initilizing an
@@ -863,7 +1089,7 @@ submit: itxn_submit
func TestAsaDuringInit(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -903,12 +1129,211 @@ func TestAsaDuringInit(t *testing.T) {
`,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &prefund, &app)
vb := l.endBlock(t, eval)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplicationID)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplicationID)
- asaIndex := vb.blk.Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
+ asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
require.Equal(t, basics.AssetIndex(3), asaIndex)
}
+
+func TestRekey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ app := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+ int 1
+ itxn_field Amount
+ global CurrentApplicationAddress
+ itxn_field Receiver
+ int 31
+ bzero
+ byte 0x01
+ concat
+ itxn_field RekeyTo
+ itxn_submit
+`),
+ }
+
+ eval := testingEvaluator{l.nextBlock(t), l}
+ eval.txns(t, &app)
+ vb := l.endBlock(t, eval)
+ appIndex := vb.Block().Payset[0].ApplicationID
+ require.Equal(t, basics.AppIndex(1), appIndex)
+
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: 1_000_000,
+ }
+ rekey := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appIndex,
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txns(t, &fund, &rekey)
+ eval.txn(t, rekey.Noted("2"), "unauthorized")
+ l.endBlock(t, eval)
+
+}
+
+func TestNote(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ app := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+ int 0
+ itxn_field Amount
+ global CurrentApplicationAddress
+ itxn_field Receiver
+ byte "abcdefghijklmnopqrstuvwxyz01234567890"
+ itxn_field Note
+ itxn_submit
+`),
+ }
+
+ eval := testingEvaluator{l.nextBlock(t), l}
+ eval.txns(t, &app)
+ vb := l.endBlock(t, eval)
+ appIndex := vb.Block().Payset[0].ApplicationID
+ require.Equal(t, basics.AppIndex(1), appIndex)
+
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: 1_000_000,
+ }
+ note := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appIndex,
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txns(t, &fund, &note)
+ vb = l.endBlock(t, eval)
+ alphabet := vb.Block().Payset[1].EvalDelta.InnerTxns[0].Txn.Note
+ require.Equal(t, "abcdefghijklmnopqrstuvwxyz01234567890", string(alphabet))
+}
+
+func TestKeyreg(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ app := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+ txn ApplicationArgs 0
+ byte "pay"
+ ==
+ bz nonpart
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+ int 1
+ itxn_field Amount
+ txn Sender
+ itxn_field Receiver
+ itxn_submit
+ int 1
+ return
+nonpart:
+ itxn_begin
+ int keyreg
+ itxn_field TypeEnum
+ int 1
+ itxn_field Nonparticipation
+ itxn_submit
+`),
+ }
+
+ // Create the app
+ eval := testingEvaluator{l.nextBlock(t), l}
+ eval.txns(t, &app)
+ vb := l.endBlock(t, eval)
+ appIndex := vb.Block().Payset[0].ApplicationID
+ require.Equal(t, basics.AppIndex(1), appIndex)
+
+ // Give the app a lot of money
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: 1_000_000_000,
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txn(t, &fund)
+ l.endBlock(t, eval)
+
+ require.Equal(t, 1_000_000_000, int(l.micros(t, appIndex.Address())))
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ eval := testingEvaluator{l.nextBlock(t), l}
+ l.endBlock(t, eval)
+ }
+
+ // pay a little
+ pay := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ ApplicationArgs: [][]byte{[]byte("pay")},
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txn(t, &pay)
+ l.endBlock(t, eval)
+ // 2000 was earned in rewards (- 1000 fee, -1 pay)
+ require.Equal(t, 1_000_000_999, int(l.micros(t, appIndex.Address())))
+
+ // Go nonpart
+ nonpart := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ ApplicationArgs: [][]byte{[]byte("nonpart")},
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txn(t, &nonpart)
+ l.endBlock(t, eval)
+ require.Equal(t, 999_999_999, int(l.micros(t, appIndex.Address())))
+
+ // Build up Residue in RewardsState so it's ready to pay AGAIN
+ // But expect no rewards
+ for i := 1; i < 100; i++ {
+ eval := testingEvaluator{l.nextBlock(t), l}
+ l.endBlock(t, eval)
+ }
+ eval = testingEvaluator{l.nextBlock(t), l}
+ eval.txn(t, pay.Noted("again"))
+ eval.txn(t, nonpart.Noted("again"), "cannot change online/offline")
+ l.endBlock(t, eval)
+ // Ppaid fee and 1. Did not get rewards
+ require.Equal(t, 999_998_998, int(l.micros(t, appIndex.Address())))
+}
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index 8ec3c6127..9d4f3b15d 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -40,6 +40,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -68,7 +69,7 @@ func (wl *wrappedLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, er
return wl.l.BlockHdr(rnd)
}
-func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
return wl.l.trackerEvalVerified(blk, accUpdatesLedger)
}
@@ -96,7 +97,11 @@ func (wl *wrappedLedger) GenesisProto() config.ConsensusParams {
return wl.l.GenesisProto()
}
-func getInitState() (genesisInitState InitState) {
+func (wl *wrappedLedger) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return wl.l.GenesisAccounts()
+}
+
+func getInitState() (genesisInitState ledgercore.InitState) {
blk := bookkeeping.Block{}
blk.CurrentProtocol = protocol.ConsensusCurrentVersion
blk.RewardsPool = testPoolAddr
@@ -790,9 +795,10 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
defer wl.l.trackerMu.RUnlock()
for _, trk := range wl.l.trackers.trackers {
if au, ok := trk.(*accountUpdates); ok {
- au.waitAccountsWriting()
- minSave = trk.committedUpTo(rnd)
- au.waitAccountsWriting()
+ wl.l.trackers.waitAccountsWriting()
+ minSave, _ = trk.committedUpTo(rnd)
+ wl.l.trackers.committedUpTo(rnd)
+ wl.l.trackers.waitAccountsWriting()
if minSave < minMinSave {
minMinSave = minSave
}
@@ -804,9 +810,9 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
au = cleanTracker.(*accountUpdates)
cfg := config.GetDefaultLocal()
cfg.Archival = true
- au.initialize(cfg, "", au.initProto, wl.l.accts.initAccounts)
+ au.initialize(cfg)
} else {
- minSave = trk.committedUpTo(rnd)
+ minSave, _ = trk.committedUpTo(rnd)
if minSave < minMinSave {
minMinSave = minSave
}
@@ -817,7 +823,7 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
}
cleanTracker.close()
- err := cleanTracker.loadFromDisk(wl)
+ err := cleanTracker.loadFromDisk(wl, wl.l.trackers.dbRound)
require.NoError(t, err)
cleanTracker.close()
diff --git a/ledger/blockqueue_test.go b/ledger/blockqueue_test.go
index 9b69d277d..55f2a39ef 100644
--- a/ledger/blockqueue_test.go
+++ b/ledger/blockqueue_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -36,7 +37,7 @@ import (
func TestPutBlockTooOld(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _, _ := genesis(10)
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
const inMem = true
@@ -67,7 +68,7 @@ func TestPutBlockTooOld(t *testing.T) {
func TestGetEncodedBlockCert(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _, _ := genesis(10)
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
const inMem = true
cfg := config.GetDefaultLocal()
diff --git a/ledger/bulletin.go b/ledger/bulletin.go
index b7bb53ce7..1e95ee2ab 100644
--- a/ledger/bulletin.go
+++ b/ledger/bulletin.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"sync/atomic"
"github.com/algorand/go-deadlock"
@@ -78,7 +80,7 @@ func (b *bulletin) Wait(round basics.Round) chan struct{} {
return signal.signal
}
-func (b *bulletin) loadFromDisk(l ledgerForTracker) error {
+func (b *bulletin) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
b.pendingNotificationRequests = make(map[basics.Round]notifier)
b.latestRound = l.Latest()
return nil
@@ -90,7 +92,7 @@ func (b *bulletin) close() {
func (b *bulletin) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
}
-func (b *bulletin) committedUpTo(rnd basics.Round) basics.Round {
+func (b *bulletin) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
b.mu.Lock()
defer b.mu.Unlock()
@@ -104,5 +106,22 @@ func (b *bulletin) committedUpTo(rnd basics.Round) basics.Round {
}
b.latestRound = rnd
- return rnd
+ return rnd, basics.Round(0)
+}
+
+func (b *bulletin) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (b *bulletin) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (b *bulletin) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (b *bulletin) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+func (b *bulletin) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
new file mode 100644
index 000000000..e70b526da
--- /dev/null
+++ b/ledger/catchpointtracker.go
@@ -0,0 +1,901 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
+// value was calibrated using BenchmarkCalibrateCacheNodeSize
+var trieCachedNodesCount = 9000
+
+// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page
+// value was calibrated using BenchmarkCalibrateNodesPerPage
+var merkleCommitterNodesPerPage = int64(116)
+
+const (
+ // trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
+ // before added to the trie during trie construction
+ trieRebuildAccountChunkSize = 16384
+ // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
+ trieRebuildCommitFrequency = 65536
+ // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
+ // we attempt to commit them to disk while writing a batch of rounds balances to disk.
+ trieAccumulatedChangesFlush = 256
+)
+
+// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
+var TrieMemoryConfig = merkletrie.MemoryConfig{
+ NodesCountPerPage: merkleCommitterNodesPerPage,
+ CachedNodesCount: trieCachedNodesCount,
+ PageFillFactor: 0.95,
+ MaxChildrenPagesThreshold: 64,
+}
+
+type catchpointTracker struct {
+ // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated
+ dbDirectory string
+
+ // catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
+ catchpointInterval uint64
+
+ // catchpointFileHistoryLength defines how many catchpoint files we want to store back.
+ // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
+ catchpointFileHistoryLength int
+
+ // archivalLedger determines whether the associated ledger was configured as archival ledger or not.
+ archivalLedger bool
+
+ // Prepared SQL statements for fast accounts DB lookups.
+ accountsq *accountsDbQueries
+
+ // log copied from ledger
+ log logging.Logger
+
+ // Connection to the database.
+ dbs db.Pair
+
+ // The last catchpoint label that was written to the database. Should always align with what's in the database.
+ // note that this is the last catchpoint *label* and not the catchpoint file.
+ lastCatchpointLabel string
+
+ // catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
+ // when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
+ // otherwise, it would take it's time and perform periodic sleeps between chunks processing.
+ catchpointSlowWriting chan struct{}
+
+ // catchpointWriting help to synchronize the catchpoint file writing. When this atomic variable is 0, no writing is going on.
+ // Any non-zero value indicates a catchpoint being written, or scheduled to be written.
+ catchpointWriting int32
+
+ // The Trie tracking the current account balances. Always matches the balances that were
+ // written to the database.
+ balancesTrie *merkletrie.Trie
+
+ // catchpointsMu is the synchronization mutex for accessing the various non-static variables.
+ catchpointsMu deadlock.RWMutex
+
+ // roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
+ roundDigest []crypto.Digest
+}
+
+// initialize initializes the catchpointTracker structure
+func (ct *catchpointTracker) initialize(cfg config.Local, dbPathPrefix string) {
+ ct.dbDirectory = filepath.Dir(dbPathPrefix)
+ ct.archivalLedger = cfg.Archival
+ switch cfg.CatchpointTracking {
+ case -1:
+ ct.catchpointInterval = 0
+ default:
+ // give a warning, then fall thought
+ logging.Base().Warnf("catchpointTracker: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
+ fallthrough
+ case 0:
+ if ct.archivalLedger {
+ ct.catchpointInterval = cfg.CatchpointInterval
+ } else {
+ ct.catchpointInterval = 0
+ }
+ case 1:
+ ct.catchpointInterval = cfg.CatchpointInterval
+ }
+
+ ct.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
+ if cfg.CatchpointFileHistoryLength < -1 {
+ ct.catchpointFileHistoryLength = -1
+ }
+}
+
+// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
+func (ct *catchpointTracker) GetLastCatchpointLabel() string {
+ ct.catchpointsMu.RLock()
+ defer ct.catchpointsMu.RUnlock()
+ return ct.lastCatchpointLabel
+}
+
+// loadFromDisk loads the state of a tracker from persistent
+// storage. The ledger argument allows loadFromDisk to load
+// blocks from the database, or access its own state. The
+// ledgerForTracker interface abstracts away the details of
+// ledger internals so that individual trackers can be tested
+// in isolation.
+func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
+ ct.log = l.trackerLog()
+ ct.dbs = l.trackerDB()
+
+ ct.roundDigest = nil
+ ct.catchpointWriting = 0
+ // keep these channel closed if we're not generating catchpoint
+ ct.catchpointSlowWriting = make(chan struct{}, 1)
+ close(ct.catchpointSlowWriting)
+
+ err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err0 := ct.accountsInitializeHashes(ctx, tx, lastBalancesRound)
+ if err0 != nil {
+ return err0
+ }
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ ct.accountsq, err = accountsInitDbQueries(ct.dbs.Rdb.Handle, ct.dbs.Wdb.Handle)
+ if err != nil {
+ return
+ }
+
+ ct.lastCatchpointLabel, _, err = ct.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
+ if err != nil {
+ return
+ }
+
+ writingCatchpointRound, _, err := ct.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
+ if err != nil {
+ return err
+ }
+ if writingCatchpointRound == 0 || !ct.catchpointEnabled() {
+ return nil
+ }
+ var dbRound basics.Round
+ // make sure that the database is at the desired round.
+ err = ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ if err != nil {
+ return err
+ }
+ if dbRound != basics.Round(writingCatchpointRound) {
+ return nil
+ }
+
+ blk, err := l.Block(dbRound)
+ if err != nil {
+ return err
+ }
+ blockHeaderDigest := blk.Digest()
+
+ ct.generateCatchpoint(context.Background(), basics.Round(writingCatchpointRound), ct.lastCatchpointLabel, blockHeaderDigest, time.Duration(0))
+ return nil
+}
+
+// newBlock informs the tracker of a new block from round
+// rnd and a given ledgercore.StateDelta as produced by BlockEvaluator.
+func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+ ct.catchpointsMu.Lock()
+ defer ct.catchpointsMu.Unlock()
+ ct.roundDigest = append(ct.roundDigest, blk.Digest())
+}
+
+// committedUpTo implements the ledgerTracker interface for catchpointTracker.
+// The method informs the tracker that committedRound and all it's previous rounds have
+// been committed to the block database. The method returns what is the oldest round
+// number that can be removed from the blocks database as well as the lookback that this
+// tracker maintains.
+func (ct *catchpointTracker) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
+ return rnd, basics.Round(0)
+}
+
+func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ var hasMultipleIntermediateCatchpoint, hasIntermediateCatchpoint bool
+
+ newBase := dcr.oldBase + basics.Round(dcr.offset)
+
+ // check if there was a catchpoint between dcc.oldBase+lookback and dcc.oldBase+offset+lookback
+ if ct.catchpointInterval > 0 {
+ nextCatchpointRound := ((uint64(dcr.oldBase+dcr.lookback) + ct.catchpointInterval) / ct.catchpointInterval) * ct.catchpointInterval
+
+ if nextCatchpointRound < uint64(dcr.oldBase+dcr.lookback)+dcr.offset {
+ mostRecentCatchpointRound := (uint64(committedRound) / ct.catchpointInterval) * ct.catchpointInterval
+ newBase = basics.Round(nextCatchpointRound) - dcr.lookback
+ if mostRecentCatchpointRound > nextCatchpointRound {
+ hasMultipleIntermediateCatchpoint = true
+ // skip if there is more than one catchpoint in queue
+ newBase = basics.Round(mostRecentCatchpointRound) - dcr.lookback
+ }
+ hasIntermediateCatchpoint = true
+ }
+ }
+
+ // if we're still writing the previous balances, we can't move forward yet.
+ if ct.IsWritingCatchpointFile() {
+ // if we hit this path, it means that we're still writing a catchpoint.
+ // see if the new delta range contains another catchpoint.
+ if hasIntermediateCatchpoint {
+ // check if we're already attempting to perform fast-writing.
+ select {
+ case <-ct.catchpointSlowWriting:
+ // yes, we're already doing fast-writing.
+ default:
+ // no, we're not yet doing fast writing, make it so.
+ close(ct.catchpointSlowWriting)
+ }
+ }
+ return nil
+ }
+
+ dcr.offset = uint64(newBase - dcr.oldBase)
+
+ // check to see if this is a catchpoint round
+ dcr.isCatchpointRound = ct.isCatchpointRound(dcr.offset, dcr.oldBase, dcr.lookback)
+
+ if dcr.isCatchpointRound && ct.archivalLedger {
+ // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
+ atomic.StoreInt32(&ct.catchpointWriting, int32(-1))
+ ct.catchpointSlowWriting = make(chan struct{}, 1)
+ if hasMultipleIntermediateCatchpoint {
+ close(ct.catchpointSlowWriting)
+ }
+ }
+
+ dcr.catchpointWriting = &ct.catchpointWriting
+
+ return dcr
+}
+
+// prepareCommit, commitRound and postCommit are called when it is time to commit tracker's data.
+// If an error returned the process is aborted.
+func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error {
+ ct.catchpointsMu.RLock()
+ defer ct.catchpointsMu.RUnlock()
+ if dcc.isCatchpointRound {
+ dcc.committedRoundDigest = ct.roundDigest[dcc.offset+uint64(dcc.lookback)-1]
+ }
+ return nil
+}
+
+func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
+ treeTargetRound := basics.Round(0)
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+
+ defer func() {
+ if err != nil {
+ if dcc.isCatchpointRound && ct.archivalLedger {
+ atomic.StoreInt32(&ct.catchpointWriting, 0)
+ }
+ }
+ }()
+
+ if ct.catchpointEnabled() {
+ var mc *MerkleCommitter
+ mc, err = MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return
+ }
+
+ var trie *merkletrie.Trie
+ if ct.balancesTrie == nil {
+ trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ ct.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
+ return err
+ }
+ ct.balancesTrie = trie
+ } else {
+ ct.balancesTrie.SetCommitter(mc)
+ }
+ treeTargetRound = dbRound + basics.Round(offset)
+ }
+
+ if dcc.updateStats {
+ dcc.stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
+ }
+
+ err = ct.accountsUpdateBalances(dcc.compactAccountDeltas)
+ if err != nil {
+ return err
+ }
+
+ if dcc.updateStats {
+ now := time.Duration(time.Now().UnixNano())
+ dcc.stats.MerkleTrieUpdateDuration = now - dcc.stats.MerkleTrieUpdateDuration
+ }
+
+ err = updateAccountsHashRound(tx, treeTargetRound)
+ if err != nil {
+ return err
+ }
+
+ if dcc.isCatchpointRound {
+ dcc.trieBalancesHash, err = ct.balancesTrie.RootHash()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ var err error
+ if dcc.isCatchpointRound {
+ dcc.catchpointLabel, err = ct.accountsCreateCatchpointLabel(dcc.newBase+dcc.lookback, dcc.roundTotals, dcc.committedRoundDigest, dcc.trieBalancesHash)
+ if err != nil {
+ ct.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
+ }
+ }
+ if ct.balancesTrie != nil {
+ _, err = ct.balancesTrie.Evict(false)
+ if err != nil {
+ ct.log.Warnf("merkle trie failed to evict: %v", err)
+ }
+ }
+
+ if dcc.isCatchpointRound && dcc.catchpointLabel != "" {
+ ct.lastCatchpointLabel = dcc.catchpointLabel
+ }
+ dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
+
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
+ }
+
+ ct.catchpointsMu.Lock()
+
+ ct.roundDigest = ct.roundDigest[dcc.offset:]
+
+ ct.catchpointsMu.Unlock()
+
+ if dcc.isCatchpointRound && ct.archivalLedger && dcc.catchpointLabel != "" {
+ // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
+ // the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
+ ct.generateCatchpoint(ctx, basics.Round(dcc.offset)+dcc.oldBase+dcc.lookback, dcc.catchpointLabel, dcc.committedRoundDigest, dcc.updatingBalancesDuration)
+ }
+ // in scheduleCommit, we expect that this function to update the catchpointWriting when
+ // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
+ // here would prevent us from "forgetting" to update this variable later on.
+ if dcc.isCatchpointRound && ct.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
+}
+
+// handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+// Tracker might update own state in this case. For example, account updates tracker cancels
+// scheduled catchpoint writing that deferred commit.
+func (ct *catchpointTracker) handleUnorderedCommit(offset uint64, dbRound basics.Round, lookback basics.Round) {
+ // if this is an archival ledger, we might need to update the catchpointWriting variable.
+ if ct.archivalLedger {
+ // determine if this was a catchpoint round
+ if ct.isCatchpointRound(offset, dbRound, lookback) {
+ // it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
+ atomic.StoreInt32(&ct.catchpointWriting, 0)
+ }
+ }
+}
+
+// close terminates the tracker, reclaiming any resources
+// like open database connections or goroutines. close may
+// be called even if loadFromDisk() is not called or does
+// not succeed.
+func (ct *catchpointTracker) close() {
+
+}
+
+// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
+func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas) (err error) {
+ if !ct.catchpointEnabled() {
+ return nil
+ }
+ var added, deleted bool
+ accumulatedChanges := 0
+
+ for i := 0; i < accountsDeltas.len(); i++ {
+ addr, delta := accountsDeltas.getByIdx(i)
+ if !delta.old.accountData.IsZero() {
+ deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData))
+ deleted, err = ct.balancesTrie.Delete(deleteHash)
+ if err != nil {
+ return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
+ }
+ if !deleted {
+ ct.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
+ } else {
+ accumulatedChanges++
+ }
+ }
+
+ if !delta.new.IsZero() {
+ addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
+ added, err = ct.balancesTrie.Add(addHash)
+ if err != nil {
+ return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
+ }
+ if !added {
+ ct.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
+ } else {
+ accumulatedChanges++
+ }
+ }
+ }
+ if accumulatedChanges >= trieAccumulatedChangesFlush {
+ accumulatedChanges = 0
+ _, err = ct.balancesTrie.Commit()
+ if err != nil {
+ return
+ }
+ }
+
+ // write it all to disk.
+ if accumulatedChanges > 0 {
+ _, err = ct.balancesTrie.Commit()
+ }
+
+ return
+}
+
+// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
+// to avoid memory pressure until the catchpoint file writing is complete.
+func (ct *catchpointTracker) IsWritingCatchpointFile() bool {
+ return atomic.LoadInt32(&ct.catchpointWriting) != 0
+}
+
+// isCatchpointRound returns true if the round at the given offset, dbRound with the provided lookback should be a catchpoint round.
+func (ct *catchpointTracker) isCatchpointRound(offset uint64, dbRound basics.Round, lookback basics.Round) bool {
+ return ((offset + uint64(lookback+dbRound)) > 0) && (ct.catchpointInterval != 0) && ((uint64((offset + uint64(lookback+dbRound))) % ct.catchpointInterval) == 0)
+}
+
+// accountsCreateCatchpointLabel creates a catchpoint label and write it.
+func (ct *catchpointTracker) accountsCreateCatchpointLabel(committedRound basics.Round, totals ledgercore.AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
+ cpLabel := ledgercore.MakeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
+ label = cpLabel.String()
+ _, err = ct.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
+ return
+}
+
+// generateCatchpoint generates a single catchpoint file
+func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
+ beforeGeneratingCatchpointTime := time.Now()
+ catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
+ BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
+ }
+
+ // the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
+ // before the catchpoint file generation could be completed.
+ retryCatchpointCreation := false
+ ct.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
+ defer func() {
+ if !retryCatchpointCreation {
+ // clear the writingCatchpoint flag
+ _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
+ }
+ }
+ }()
+
+ _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
+ return
+ }
+
+ relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
+ absCatchpointFileName := filepath.Join(ct.dbDirectory, relCatchpointFileName)
+
+ more := true
+ const shortChunkExecutionDuration = 50 * time.Millisecond
+ const longChunkExecutionDuration = 1 * time.Second
+ var chunkExecutionDuration time.Duration
+ select {
+ case <-ct.catchpointSlowWriting:
+ chunkExecutionDuration = longChunkExecutionDuration
+ default:
+ chunkExecutionDuration = shortChunkExecutionDuration
+ }
+
+ var catchpointWriter *catchpointWriter
+ start := time.Now()
+ ledgerGeneratecatchpointCount.Inc(nil)
+ err = ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
+ catchpointWriter = makeCatchpointWriter(ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
+ for more {
+ stepCtx, stepCancelFunction := context.WithTimeout(ctx, chunkExecutionDuration)
+ writeStepStartTime := time.Now()
+ more, err = catchpointWriter.WriteStep(stepCtx)
+ // accumulate the actual time we've spent writing in this step.
+ catchpointGenerationStats.CPUTime += uint64(time.Since(writeStepStartTime).Nanoseconds())
+ stepCancelFunction()
+ if more && err == nil {
+ // we just wrote some data, but there is more to be written.
+ // go to sleep for while.
+ // before going to sleep, extend the transaction timeout so that we won't get warnings:
+ _, err0 := db.ResetTransactionWarnDeadline(dbCtx, tx, time.Now().Add(1*time.Second))
+ if err0 != nil {
+ ct.log.Warnf("catchpointTracker: generateCatchpoint: failed to reset transaction warn deadline : %v", err0)
+ }
+ select {
+ case <-time.After(100 * time.Millisecond):
+ // increase the time slot allocated for writing the catchpoint, but stop when we get to the longChunkExecutionDuration limit.
+ // this would allow the catchpoint writing speed to ramp up while still leaving some cpu available.
+ chunkExecutionDuration *= 2
+ if chunkExecutionDuration > longChunkExecutionDuration {
+ chunkExecutionDuration = longChunkExecutionDuration
+ }
+ case <-ctx.Done():
+ retryCatchpointCreation = true
+ err2 := catchpointWriter.Abort()
+ if err2 != nil {
+ return fmt.Errorf("error removing catchpoint file : %v", err2)
+ }
+ return nil
+ case <-ct.catchpointSlowWriting:
+ chunkExecutionDuration = longChunkExecutionDuration
+ }
+ }
+ if err != nil {
+ err = fmt.Errorf("unable to create catchpoint : %v", err)
+ err2 := catchpointWriter.Abort()
+ if err2 != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
+ }
+ return
+ }
+ }
+ return
+ })
+ ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
+
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
+ return
+ }
+ if catchpointWriter == nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
+ return
+ }
+
+ err = ct.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
+ return
+ }
+ catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
+ catchpointGenerationStats.WritingDuration = uint64(time.Since(beforeGeneratingCatchpointTime).Nanoseconds())
+ catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
+ catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
+ ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
+ ct.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
+ With("CPUTime", catchpointGenerationStats.CPUTime).
+ With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
+ With("accountsCount", catchpointGenerationStats.AccountsCount).
+ With("fileSize", catchpointGenerationStats.FileSize).
+ With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
+ Infof("Catchpoint file was generated")
+}
+
+// catchpointRoundToPath calculate the catchpoint file path for a given round
+func catchpointRoundToPath(rnd basics.Round) string {
+ irnd := int64(rnd) / 256
+ outStr := ""
+ for irnd > 0 {
+ outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
+ irnd = irnd / 256
+ }
+ outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
+ return outStr
+}
+
+// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
+// after a successful insert operation to the database, it would delete up to 2 old entries, as needed.
+// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
+// database and storage realign.
+func (ct *catchpointTracker) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
+ if ct.catchpointFileHistoryLength != 0 {
+ err = ct.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
+ if err != nil {
+ ct.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
+ return
+ }
+ } else {
+ err = os.Remove(fileName)
+ if err != nil {
+ ct.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
+ return
+ }
+ }
+ if ct.catchpointFileHistoryLength == -1 {
+ return
+ }
+ var filesToDelete map[basics.Round]string
+ filesToDelete, err = ct.accountsq.getOldestCatchpointFiles(context.Background(), 2, ct.catchpointFileHistoryLength)
+ if err != nil {
+ return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
+ }
+ for round, fileToDelete := range filesToDelete {
+ absCatchpointFileName := filepath.Join(ct.dbDirectory, fileToDelete)
+ err = os.Remove(absCatchpointFileName)
+ if err == nil || os.IsNotExist(err) {
+ // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
+ err = nil
+ } else {
+ // we can't delete the file, abort -
+ return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
+ }
+ err = ct.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
+ if err != nil {
+ return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
+ }
+ }
+ return
+}
+
+// GetCatchpointStream returns a ReadCloseSizer to the catchpoint file associated with the provided round
+func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
+ dbFileName := ""
+ fileSize := int64(0)
+ start := time.Now()
+ ledgerGetcatchpointCount.Inc(nil)
+ err := ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbFileName, _, fileSize, err = getCatchpoint(tx, round)
+ return
+ })
+ ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil)
+ if err != nil && err != sql.ErrNoRows {
+ // we had some sql error.
+ return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
+ }
+ if dbFileName != "" {
+ catchpointPath := filepath.Join(ct.dbDirectory, dbFileName)
+ file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ if err == nil && file != nil {
+ return &readCloseSizer{ReadCloser: file, size: fileSize}, nil
+ }
+ // else, see if this is a file-not-found error
+ if os.IsNotExist(err) {
+ // the database told us that we have this file.. but we couldn't find it.
+ // delete it from the database.
+ err := ct.saveCatchpointFile(round, "", 0, "")
+ if err != nil {
+ ct.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
+ return nil, err
+ }
+
+ return nil, ledgercore.ErrNoEntry{}
+ }
+ // it's some other error.
+ return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
+ }
+
+ // if the database doesn't know about that round, see if we have that file anyway:
+ fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
+ catchpointPath := filepath.Join(ct.dbDirectory, fileName)
+ file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ if err == nil && file != nil {
+ // great, if found that we should have had this in the database.. add this one now :
+ fileInfo, err := file.Stat()
+ if err != nil {
+ // we couldn't get the stat, so just return with the file.
+ return &readCloseSizer{ReadCloser: file, size: -1}, nil
+ }
+
+ err = ct.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
+ if err != nil {
+ ct.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
+ }
+ return &readCloseSizer{ReadCloser: file, size: fileInfo.Size()}, nil
+ }
+ return nil, ledgercore.ErrNoEntry{}
+}
+
+// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
+// once all the files have been deleted, it would go ahead and remove the entries from the table.
+func deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries, dbDirectory string) (err error) {
+ catchpointsFilesChunkSize := 50
+ for {
+ fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
+ if err != nil {
+ return err
+ }
+ if len(fileNames) == 0 {
+ break
+ }
+
+ for round, fileName := range fileNames {
+ absCatchpointFileName := filepath.Join(dbDirectory, fileName)
+ err = os.Remove(absCatchpointFileName)
+ if err == nil || os.IsNotExist(err) {
+ // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
+ } else {
+ // we can't delete the file, abort -
+ return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
+ }
+ // clear the entry from the database
+ err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
+func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
+ hash := make([]byte, 4+crypto.DigestSize)
+ // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
+ // recent updated to be in-cache, and "older" nodes will be left alone.
+ for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
+ // the following takes the rewards & 255 -> hash[i]
+ hash[i] = byte(rewards)
+ }
+ entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
+ copy(hash[4:], entryHash[:])
+ return hash[:]
+}
+
+func (ct *catchpointTracker) catchpointEnabled() bool {
+ return ct.catchpointInterval != 0
+}
+
+// accountsInitializeHashes initializes account hashes.
+// as part of the initialization, it tests if a hash table matches to account base and updates the former.
+func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error {
+ hashRound, err := accountsHashRound(tx)
+ if err != nil {
+ return err
+ }
+
+ if hashRound != rnd {
+ // if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
+ // with the hashes.
+ err = resetAccountHashes(tx)
+ if err != nil {
+ return err
+ }
+ // if catchpoint is disabled on this node, we could complete the initialization right here.
+ if !ct.catchpointEnabled() {
+ return nil
+ }
+ }
+
+ // create the merkle trie for the balances
+ committer, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
+ }
+
+ trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
+ }
+
+ // we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
+ // we can figure this out by examining the hash of the root:
+ rootHash, err := trie.RootHash()
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
+ }
+
+ if rootHash.IsZero() {
+ ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ defer accountBuilderIt.Close(ctx)
+ startTrieBuildTime := time.Now()
+ accountsCount := 0
+ lastRebuildTime := startTrieBuildTime
+ pendingAccounts := 0
+ totalOrderedAccounts := 0
+ for {
+ accts, processedRows, err := accountBuilderIt.Next(ctx)
+ if err == sql.ErrNoRows {
+ // the account builder would return sql.ErrNoRows when no more data is available.
+ break
+ } else if err != nil {
+ return err
+ }
+
+ if len(accts) > 0 {
+ accountsCount += len(accts)
+ pendingAccounts += len(accts)
+ for _, acct := range accts {
+ added, err := trie.Add(acct.digest)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
+ }
+ if !added {
+ ct.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), acct.address)
+ }
+ }
+
+ if pendingAccounts >= trieRebuildCommitFrequency {
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ }
+ pendingAccounts = 0
+ }
+
+ if time.Since(lastRebuildTime) > 5*time.Second {
+ // let the user know that the trie is still being rebuilt.
+ ct.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
+ lastRebuildTime = time.Now()
+ }
+ } else if processedRows > 0 {
+ totalOrderedAccounts += processedRows
+ // if it's not ordered, we can ignore it for now; we'll just increase the counters and emit logs periodically.
+ if time.Since(lastRebuildTime) > 5*time.Second {
+ // let the user know that the trie is still being rebuilt.
+ ct.log.Infof("accountsInitialize still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
+ lastRebuildTime = time.Now()
+ }
+ }
+ }
+
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ }
+
+ // we've just updated the merkle trie, update the hashRound to reflect that.
+ err = updateAccountsHashRound(tx, rnd)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to update the account hash round to %d: %v", rnd, err)
+ }
+
+ ct.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Since(startTrieBuildTime))
+ }
+ ct.balancesTrie = trie
+ return nil
+}
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
new file mode 100644
index 000000000..64db5f275
--- /dev/null
+++ b/ledger/catchpointtracker_test.go
@@ -0,0 +1,415 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestIsWritingCatchpointFile(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ct := &catchpointTracker{}
+
+ ct.catchpointWriting = -1
+ ans := ct.IsWritingCatchpointFile()
+ require.True(t, ans)
+
+ ct.catchpointWriting = 0
+ ans = ct.IsWritingCatchpointFile()
+ require.False(t, ans)
+}
+
+func newCatchpointTracker(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *catchpointTracker {
+ au := &accountUpdates{}
+ ct := &catchpointTracker{}
+ au.initialize(conf)
+ ct.initialize(conf, dbPathPrefix)
+ _, err := trackerDBInitialize(l, ct.catchpointEnabled(), dbPathPrefix)
+ require.NoError(tb, err)
+
+ err = l.trackers.initialize(l, []ledgerTracker{au, ct}, conf)
+ require.NoError(tb, err)
+ err = l.trackers.loadFromDisk(l)
+ require.NoError(tb, err)
+ return ct
+}
+
+func TestGetCatchpointStream(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ ct := newCatchpointTracker(t, ml, conf, ".")
+ defer ct.close()
+
+ filesToCreate := 4
+
+ temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
+ require.NoError(t, err)
+ defer func() {
+ os.RemoveAll(temporaryDirectroy)
+ }()
+ catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
+ err = os.Mkdir(catchpointsDirectory, 0777)
+ require.NoError(t, err)
+
+ ct.dbDirectory = temporaryDirectroy
+
+ // Create the catchpoint files with dummy data
+ for i := 0; i < filesToCreate; i++ {
+ fileName := filepath.Join("catchpoints", fmt.Sprintf("%d.catchpoint", i))
+ data := []byte{byte(i), byte(i + 1), byte(i + 2)}
+ err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666)
+ require.NoError(t, err)
+
+ // Store the catchpoint into the database
+ err := ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", int64(len(data)))
+ require.NoError(t, err)
+ }
+
+ dataRead := make([]byte, 3)
+ var n int
+
+ // File on disk, and database has the record
+ reader, err := ct.GetCatchpointStream(basics.Round(1))
+ n, err = reader.Read(dataRead)
+ require.NoError(t, err)
+ require.Equal(t, 3, n)
+ outData := []byte{1, 2, 3}
+ require.Equal(t, outData, dataRead)
+ len, err := reader.Size()
+ require.NoError(t, err)
+ require.Equal(t, int64(3), len)
+
+ // File deleted, but record in the database
+ err = os.Remove(filepath.Join(temporaryDirectroy, "catchpoints", "2.catchpoint"))
+ reader, err = ct.GetCatchpointStream(basics.Round(2))
+ require.Equal(t, ledgercore.ErrNoEntry{}, err)
+ require.Nil(t, reader)
+
+ // File on disk, but database lost the record
+ err = ct.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0)
+ reader, err = ct.GetCatchpointStream(basics.Round(3))
+ n, err = reader.Read(dataRead)
+ require.NoError(t, err)
+ require.Equal(t, 3, n)
+ outData = []byte{3, 4, 5}
+ require.Equal(t, outData, dataRead)
+
+ err = deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ require.NoError(t, err)
+}
+
+// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
+// it doing so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
+// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test call the function
+// and ensures that it did not errored, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
+// entries.
+func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ ct := newCatchpointTracker(t, ml, conf, ".")
+ defer ct.close()
+
+ dummyCatchpointFilesToCreate := 42
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ f, err := os.Create(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
+ require.NoError(t, err)
+ err = f.Close()
+ require.NoError(t, err)
+ }
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ err := ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fmt.Sprintf("./dummy_catchpoint_file-%d", i), "", 0)
+ require.NoError(t, err)
+ }
+ err := deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ require.NoError(t, err)
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ // ensure that all the files were deleted.
+ _, err := os.Open(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
+ require.True(t, os.IsNotExist(err))
+ }
+ fileNames, err := ct.accountsq.getOldestCatchpointFiles(context.Background(), dummyCatchpointFilesToCreate, 0)
+ require.NoError(t, err)
+ require.Equal(t, 0, len(fileNames))
+}
+
+func BenchmarkLargeCatchpointWriting(b *testing.B) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ct := catchpointTracker{}
+ ct.initialize(cfg, ".")
+
+ temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
+ require.NoError(b, err)
+ defer func() {
+ os.RemoveAll(temporaryDirectroy)
+ }()
+ catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
+ err = os.Mkdir(catchpointsDirectory, 0777)
+ require.NoError(b, err)
+
+ ct.dbDirectory = temporaryDirectroy
+
+ err = ct.loadFromDisk(ml, 0)
+ require.NoError(b, err)
+ defer ct.close()
+
+ // at this point, the database was created. We want to fill the accounts data
+ accountsNumber := 6000000 * b.N
+ err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
+ var updates compactAccountDeltas
+ for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
+ addr := ledgertesting.RandomAddress()
+ acctData := basics.AccountData{}
+ acctData.MicroAlgos.Raw = 1
+ updates.upsert(addr, accountDelta{new: acctData})
+ i++
+ }
+
+ _, err = accountsNewRound(tx, updates, nil, proto, basics.Round(1))
+ if err != nil {
+ return
+ }
+ }
+
+ return updateAccountsHashRound(tx, 1)
+ })
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ ct.generateCatchpoint(context.Background(), basics.Round(0), "0#ABCD", crypto.Digest{}, time.Second)
+ b.StopTimer()
+ b.ReportMetric(float64(accountsNumber), "accounts")
+}
+
+func TestReproducibleCatchpointLabels(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ t.Skip("This test is too slow on ARM and causes travis builds to time out")
+ }
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = 32
+ protoParams.SeedLookback = 2
+ protoParams.SeedRefreshInterval = 8
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ rewardsLevels := []uint64{0}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 50
+ cfg.CatchpointTracking = 1
+ ct := newCatchpointTracker(t, ml, cfg, ".")
+ au := ml.trackers.accts
+ defer ct.close()
+
+ rewardLevel := uint64(0)
+
+ const testCatchpointLabelsCount = 5
+
+ // lastCreatableID stores asset or app max used index to get rid of conflicts
+ lastCreatableID := crypto.RandUint64() % 512
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ catchpointLabels := make(map[basics.Round]string)
+ ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
+ roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
+ for i := basics.Round(1); i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ var updates ledgercore.AccountDeltas
+ var totals map[basics.Address]basics.AccountData
+ base := accts[i-1]
+ updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = testProtocolVersion
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
+ delta.Totals = newTotals
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ roundDeltas[i] = delta
+
+ // if this is a catchpoint round, save the label.
+ if uint64(i)%cfg.CatchpointInterval == 0 {
+ ml.trackers.waitAccountsWriting()
+ catchpointLabels[i] = ct.GetLastCatchpointLabel()
+ ledgerHistory[i] = ml.fork(t)
+ defer ledgerHistory[i].Close()
+ }
+ }
+
+ // test in revese what happens when we try to repeat the exact same blocks.
+ // start off with the catchpoint before the last one
+ startingRound := basics.Round((testCatchpointLabelsCount - 1) * cfg.CatchpointInterval)
+ for ; startingRound > basics.Round(cfg.CatchpointInterval); startingRound -= basics.Round(cfg.CatchpointInterval) {
+ au.close()
+ ml2 := ledgerHistory[startingRound]
+
+ ct := newCatchpointTracker(t, ml2, cfg, ".")
+ for i := startingRound + 1; i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardsLevels[i]
+ blk.CurrentProtocol = testProtocolVersion
+ delta := roundDeltas[i]
+ ml2.trackers.newBlock(blk, delta)
+ ml2.trackers.committedUpTo(i)
+
+ // if this is a catchpoint round, check the label.
+ if uint64(i)%cfg.CatchpointInterval == 0 {
+ ml2.trackers.waitAccountsWriting()
+ require.Equal(t, catchpointLabels[i], ct.GetLastCatchpointLabel())
+ }
+ }
+ }
+
+ // test to see that after loadFromDisk, all the tracker content is lost ( as expected )
+ require.NotZero(t, len(ct.roundDigest))
+ require.NoError(t, ct.loadFromDisk(ml, ml.Latest()))
+ require.Zero(t, len(ct.roundDigest))
+ require.Zero(t, ct.catchpointWriting)
+ select {
+ case _, closed := <-ct.catchpointSlowWriting:
+ require.False(t, closed)
+ default:
+ require.FailNow(t, "The catchpointSlowWriting should have been a closed channel; it seems to be a nil ?!")
+ }
+}
+
+func TestCatchpointTrackerPrepareCommit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ct := &catchpointTracker{}
+ const maxOffset = 40
+ const maxLookback = 320
+ ct.roundDigest = make([]crypto.Digest, maxOffset+maxLookback)
+ for i := 0; i < len(ct.roundDigest); i++ {
+ ct.roundDigest[i] = crypto.Hash([]byte{byte(i), byte(i / 256)})
+ }
+ dcc := &deferredCommitContext{}
+ for offset := uint64(1); offset < maxOffset; offset++ {
+ dcc.offset = offset
+ for lookback := basics.Round(0); lookback < maxLookback; lookback += 20 {
+ dcc.lookback = lookback
+ for _, isCatchpointRound := range []bool{false, true} {
+ dcc.isCatchpointRound = isCatchpointRound
+ require.NoError(t, ct.prepareCommit(dcc))
+ if isCatchpointRound {
+ expectedRound := offset + uint64(lookback) - 1
+ expectedHash := crypto.Hash([]byte{byte(expectedRound), byte(expectedRound / 256)})
+ require.Equal(t, expectedHash[:], dcc.committedRoundDigest[:])
+ }
+ }
+ }
+ }
+}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 26fa65f71..349748176 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -294,7 +294,7 @@ func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (e
func (cw *catchpointWriter) readHeaderFromDatabase(ctx context.Context, tx *sql.Tx) (err error) {
var header CatchpointFileHeader
- header.BalancesRound, _, err = accountsRound(tx)
+ header.BalancesRound, err = accountsRound(tx)
if err != nil {
return
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 932ab37ad..0b14f5524 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -34,6 +34,8 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -182,18 +184,16 @@ func TestBasicCatchpointWriter(t *testing.T) {
delete(config.Consensus, testProtocolVersion)
os.RemoveAll(temporaryDirectroy)
}()
+ accts := ledgertesting.RandomAccounts(300, false)
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
defer ml.Close()
- accts := randomAccounts(300, false)
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts)
- defer au.close()
- err := au.loadFromDisk(ml)
+ au := newAcctUpdates(t, ml, conf, ".")
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
@@ -283,17 +283,15 @@ func TestFullCatchpointWriter(t *testing.T) {
os.RemoveAll(temporaryDirectroy)
}()
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion)
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
defer ml.Close()
- accts := randomAccounts(BalancesPerCatchpointFileChunk*3, false)
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts)
- defer au.close()
- err := au.loadFromDisk(ml)
+ au := newAcctUpdates(t, ml, conf, ".")
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
@@ -315,7 +313,7 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// create a ledger.
- var initState InitState
+ var initState ledgercore.InitState
initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
require.NoError(t, err)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 38b396b19..784a258c7 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -121,7 +121,7 @@ const (
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
rdb := ledger.trackerDB().Rdb
wdb := ledger.trackerDB().Wdb
- accountsq, err := accountsDbInit(rdb.Handle, wdb.Handle)
+ accountsq, err := accountsInitDbQueries(rdb.Handle, wdb.Handle)
if err != nil {
log.Warnf("unable to initialize account db in MakeCatchpointCatchupAccessor : %v", err)
return nil
@@ -193,7 +193,7 @@ func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
return fmt.Errorf("unable to reset catchpoint catchup balances : %v", err)
}
if !newCatchup {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
}
@@ -271,7 +271,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
start := time.Now()
ledgerProcessstagingcontentCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to initialize accountsDbInit: %v", err)
}
@@ -665,7 +665,7 @@ func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context,
start := time.Now()
ledgerStorebalancesroundCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to initialize accountsDbInit: %v", err)
}
@@ -769,7 +769,7 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
var balancesRound uint64
var totals ledgercore.AccountTotals
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
}
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 576280d8e..e3a073cdf 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -32,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -69,7 +70,7 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
}
func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
@@ -145,7 +146,7 @@ func TestCatchupAcessorFoo(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /* initKeys */ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /* initKeys */ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -201,7 +202,7 @@ func TestBuildMerkleTrie(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -300,7 +301,7 @@ func TestCatchupAccessorBlockdb(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /*initKeys*/ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /*initKeys*/ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -326,7 +327,7 @@ func TestVerifyCatchpoint(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /*initKeys*/ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /*initKeys*/ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
diff --git a/ledger/eval_test.go b/ledger/eval_test.go
deleted file mode 100644
index 9d3598250..000000000
--- a/ledger/eval_test.go
+++ /dev/null
@@ -1,1831 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package ledger
-
-import (
- "context"
- "errors"
- "fmt"
- "math/rand"
- "os"
- "path/filepath"
- "reflect"
- "runtime/pprof"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/algorand/go-deadlock"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/transactions/logic"
- "github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
-var minFee basics.MicroAlgos
-
-func init() {
- params := config.Consensus[protocol.ConsensusCurrentVersion]
- minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
-}
-
-func TestBlockEvaluator(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genesisInitState, addrs, keys := genesis(10)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
- require.Equal(t, eval.specials.FeeSink, testSinkAddr)
- require.NoError(t, err)
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[1],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
-
- // Correct signature should work
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- // Broken signature should fail
- stbad := st
- st.Sig[2] ^= 8
- txgroup := []transactions.SignedTxn{stbad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- // Repeat should fail
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // out of range should fail
- btxn := txn
- btxn.FirstValid++
- btxn.LastValid += 2
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // bogus group should fail
- btxn = txn
- btxn.Group[1] = 1
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // mixed fields should fail
- btxn = txn
- btxn.XferAsset = 3
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
- // err = eval.Transaction(st, transactions.ApplyData{})
- // require.Error(t, err)
-
- selfTxn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[2],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[2],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := selfTxn.Sign(keys[2])
-
- // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
- txgroup = []transactions.SignedTxn{stxn}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- err = eval.Transaction(stxn, transactions.ApplyData{})
- require.NoError(t, err)
-
- t3 := txn
- t3.Amount.Raw++
- t4 := selfTxn
- t4.Amount.Raw++
-
- // a group without .Group should fail
- s3 := t3.Sign(keys[0])
- s4 := t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // Test a group that should work
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
- t3.Group = crypto.HashObj(group)
- t4.Group = t3.Group
- s3 = t3.Sign(keys[0])
- s4 = t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- // disagreement on Group id should fail
- t4bad := t4
- t4bad.Group[3] ^= 3
- s4bad := t4bad.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4bad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // missing part of the group should fail
- txgroup = []transactions.SignedTxn{s3}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
-
- accts := genesisInitState.Accounts
- bal0 := accts[addrs[0]]
- bal1 := accts[addrs[1]]
- bal2 := accts[addrs[2]]
-
- l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
-
- bal0new, err := l.Lookup(newBlock.Round(), addrs[0])
- require.NoError(t, err)
- bal1new, err := l.Lookup(newBlock.Round(), addrs[1])
- require.NoError(t, err)
- bal2new, err := l.Lookup(newBlock.Round(), addrs[2])
- require.NoError(t, err)
-
- require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
- require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
- require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
-}
-
-func TestRekeying(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // Pretend rekeying is supported
- actual := config.Consensus[protocol.ConsensusCurrentVersion]
- pretend := actual
- pretend.SupportRekeying = true
- config.Consensus[protocol.ConsensusCurrentVersion] = pretend
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = actual
- }()
-
- // Bring up a ledger
- genesisInitState, addrs, keys := genesis(10)
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- // Make a new block
- nextRound := l.Latest() + basics.Round(1)
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
-
- // Test plan
- // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
- makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: nextRound,
- LastValid: nextRound,
- GenesisHash: genHash,
- RekeyTo: rekeyto,
- Note: []byte{uniq},
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: sender,
- },
- }
- sig := signer.Sign(txn)
- return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
- }
-
- tryBlock := func(stxns []transactions.SignedTxn) error {
- // We'll make a block using the evaluator.
- // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
- // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(t, err)
-
- for _, stxn := range stxns {
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- return err
- }
- }
- validatedBlock, err := eval.GenerateBlock()
- if err != nil {
- return err
- }
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
- _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
- return err
- }
-
- // Preamble transactions, which all of the blocks in this test will start with
- // [A -> 0][0,A] (normal transaction)
- // [A -> B][0,A] (rekey)
- txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
- txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
-
- // Test 1: Do only good things
- // (preamble)
- // [A -> 0][B,B] (normal transaction using new key)
- // [A -> A][B,B] (rekey back to A, transaction still signed by B)
- // [A -> 0][0,A] (normal transaction again)
- test1txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
- makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
- makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
- }
- err = tryBlock(test1txns)
- require.NoError(t, err)
-
- // Test 2: Use old key after rekeying
- // (preamble)
- // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
- test2txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
- }
- err = tryBlock(test2txns)
- require.Error(t, err)
-
- // TODO: More tests
-}
-
-func TestPrepareEvalParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval := BlockEvaluator{
- prevHeader: bookkeeping.BlockHeader{
- TimeStamp: 1234,
- Round: 2345,
- },
- }
-
- params := []config.ConsensusParams{
- {Application: true, MaxAppProgramCost: 700},
- config.Consensus[protocol.ConsensusV29],
- config.Consensus[protocol.ConsensusFuture],
- }
-
- // Create some sample transactions
- payment := txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: basics.Address{1, 2, 3, 4},
- Receiver: basics.Address{4, 3, 2, 1},
- Amount: 100,
- }.SignedTxnWithAD()
-
- appcall1 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: basics.Address{1, 2, 3, 4},
- ApplicationID: basics.AppIndex(1),
- }.SignedTxnWithAD()
-
- appcall2 := appcall1
- appcall2.SignedTxn.Txn.ApplicationCallTxnFields.ApplicationID = basics.AppIndex(2)
-
- type evalTestCase struct {
- group []transactions.SignedTxnWithAD
-
- // indicates if prepareAppEvaluators should return a non-nil
- // appTealEvaluator for the txn at index i
- expected []bool
-
- numAppCalls int
- // Used for checking transitive pointer equality in app calls
- // If there are no app calls in the group, it is set to -1
- firstAppCallIndex int
- }
-
- // Create some groups with these transactions
- cases := []evalTestCase{
- {[]transactions.SignedTxnWithAD{payment}, []bool{false}, 0, -1},
- {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}, 1, 0},
- {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}, 0, -1},
- {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}, 1, 0},
- {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}, 1, 1},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}, 2, 0},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}, 3, 0},
- {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}, 1, 1},
- {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}, 2, 0},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- eval.proto = param
- res := eval.prepareEvalParams(testCase.group)
- require.Equal(t, len(res), len(testCase.group))
-
- // Compute the expected transaction group without ApplyData for
- // the test case
- expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
- for k := range testCase.group {
- expGroupNoAD[k] = testCase.group[k].SignedTxn
- }
-
- // Ensure non app calls have a nil evaluator, and that non-nil
- // evaluators point to the right transactions and values
- for k, present := range testCase.expected {
- if present {
- require.NotNil(t, res[k])
- require.NotNil(t, res[k].PastSideEffects)
- require.Equal(t, res[k].GroupIndex, uint64(k))
- require.Equal(t, res[k].TxnGroup, expGroupNoAD)
- require.Equal(t, *res[k].Proto, eval.proto)
- require.Equal(t, *res[k].Txn, testCase.group[k].SignedTxn)
- require.Equal(t, res[k].MinTealVersion, res[testCase.firstAppCallIndex].MinTealVersion)
- require.Equal(t, res[k].PooledApplicationBudget, res[testCase.firstAppCallIndex].PooledApplicationBudget)
- if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusV29]) {
- require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost))
- } else if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusFuture]) {
- require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost*testCase.numAppCalls))
- }
- } else {
- require.Nil(t, res[k])
- }
- }
- })
- }
- }
-}
-
-func testLedgerCleanup(l *Ledger, dbName string, inMem bool) {
- l.Close()
- if !inMem {
- hits, err := filepath.Glob(dbName + "*.sqlite")
- if err != nil {
- return
- }
- for _, fname := range hits {
- os.Remove(fname)
- }
- }
-}
-
-func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*BlockEvaluator, basics.Address, error) {
- genesisInitState, addrs, keys := genesis(10)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(t, err)
- eval.validate = true
- eval.generate = false
-
- ops, err := logic.AssembleString(`#pragma version 2
- txn ApplicationID
- bz create
- byte "caller"
- txn Sender
- app_global_put
- b ok
-create:
- byte "creator"
- txn Sender
- app_global_put
-ok:
- int 1`)
- require.NoError(t, err, ops.Errors)
- approval := ops.Program
- ops, err = logic.AssembleString("#pragma version 2\nint 1")
- require.NoError(t, err)
- clear := ops.Program
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
- header := transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- }
- appcall1 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- GlobalStateSchema: schema,
- ApprovalProgram: approval,
- ClearStateProgram: clear,
- },
- }
-
- appcall2 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: 1,
- },
- }
-
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2)}
- appcall1.Group = crypto.HashObj(group)
- appcall2.Group = crypto.HashObj(group)
- stxn1 := appcall1.Sign(keys[0])
- stxn2 := appcall2.Sign(keys[0])
-
- g := []transactions.SignedTxnWithAD{
- {
- SignedTxn: stxn1,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- },
- ApplicationID: 1,
- },
- },
- {
- SignedTxn: stxn2,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "caller": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- }},
- },
- }
- txgroup := []transactions.SignedTxn{stxn1, stxn2}
- err = eval.TestTransactionGroup(txgroup)
- if err != nil {
- return eval, addrs[0], err
- }
- err = eval.transactionGroup(g)
- return eval, addrs[0], err
-}
-
-// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
-// the test ensures that
-// commitToParent -> applyChild copies child's cow state usage counts into parent
-// and the usage counts correctly propagated from parent cow to child cow and back
-func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- _, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
- require.Error(t, err)
- require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
-}
-
-// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
-// produce correct results when a txn group has storage allocate and storage update actions
-func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
- require.NoError(t, err)
- deltas := eval.state.deltas()
- ad, _ := deltas.Accts.Get(addr)
- state := ad.AppParams[1].GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
-}
-
-func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- eval := l.nextBlock(t)
- eval.proto = config.Consensus[consensusVersion]
-
- appcall1 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- GlobalStateSchema: schema,
- ApprovalProgram: approvalProgram,
- }
-
- appcall2 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- appcall3 := txntest.Txn{
- Sender: addrs[1],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- return eval.txgroup(t, &appcall1, &appcall2, &appcall3)
-}
-
-// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
-// budgets in a group txn and return an error if the budget is exceeded
-func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- source := func(n int, m int) string {
- return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
- strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
- }
-
- params := []protocol.ConsensusVersion{
- protocol.ConsensusV29,
- protocol.ConsensusFuture,
- }
-
- cases := []struct {
- prog string
- isSuccessV29 bool
- isSuccessVFuture bool
- expectedErrorV29 string
- expectedErrorVFuture string
- }{
- {source(5, 47), true, true,
- "",
- ""},
- {source(5, 48), false, true,
- "pc=157 dynamic cost budget exceeded, executing pushint: remaining budget is 700 but program cost was 701",
- ""},
- {source(16, 17), false, true,
- "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
- ""},
- {source(16, 18), false, false,
- "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
- "pc= 78 dynamic cost budget exceeded, executing pushint: remaining budget is 2100 but program cost was 2101"},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
- if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorV29)
- } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
- }
- })
- }
- }
-}
-
-// BenchTxnGenerator generates transactions as long as asked for
-type BenchTxnGenerator interface {
- // Prepare should be used for making pre-benchmark ledger initialization
- // like accounts funding, assets or apps creation
- Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int)
- // Txn generates a single transaction
- Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn
-}
-
-// BenchPaymentTxnGenerator generates payment transactions
-type BenchPaymentTxnGenerator struct {
- counter int
-}
-
-func (g *BenchPaymentTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
- return nil, 0
-}
-
-func (g *BenchPaymentTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
- sender := g.counter % len(addrs)
- receiver := (g.counter + 1) % len(addrs)
- // The following would create more random selection of accounts, and prevent a cache of half of the accounts..
- // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)})
- // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs))
- // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs))
-
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[sender],
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd,
- GenesisHash: gh,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[receiver],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := txn.Sign(keys[sender])
- g.counter++
- return stxn
-}
-
-// BenchAppTxnGenerator generates app opt in transactions
-type BenchAppOptInsTxnGenerator struct {
- NumApps int
- Proto protocol.ConsensusVersion
- Program []byte
- OptedInAccts []basics.Address
- OptedInAcctsIndices []int
-}
-
-func (g *BenchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
- maxLocalSchemaEntries := config.Consensus[g.Proto].MaxLocalSchemaEntries
- maxAppsOptedIn := config.Consensus[g.Proto].MaxAppsOptedIn
-
- // this function might create too much transaction even to fit into a single block
- // estimate number of smaller blocks needed in order to set LastValid properly
- const numAccts = 10000
- const maxTxnPerBlock = 10000
- expectedTxnNum := g.NumApps + numAccts*maxAppsOptedIn
- expectedNumOfBlocks := expectedTxnNum/maxTxnPerBlock + 1
-
- createTxns := make([]transactions.SignedTxn, 0, g.NumApps)
- for i := 0; i < g.NumApps; i++ {
- creatorIdx := rand.Intn(len(addrs))
- creator := addrs[creatorIdx]
- txn := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: transactions.Header{
- Sender: creator,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd + basics.Round(expectedNumOfBlocks),
- GenesisHash: gh,
- Note: randomNote(),
- },
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApprovalProgram: g.Program,
- ClearStateProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- },
- }
- stxn := txn.Sign(keys[creatorIdx])
- createTxns = append(createTxns, stxn)
- }
-
- appsOptedIn := make(map[basics.Address]map[basics.AppIndex]struct{}, numAccts)
-
- optInTxns := make([]transactions.SignedTxn, 0, numAccts*maxAppsOptedIn)
-
- for i := 0; i < numAccts; i++ {
- var senderIdx int
- var sender basics.Address
- for {
- senderIdx = rand.Intn(len(addrs))
- sender = addrs[senderIdx]
- if len(appsOptedIn[sender]) < maxAppsOptedIn {
- appsOptedIn[sender] = make(map[basics.AppIndex]struct{}, maxAppsOptedIn)
- break
- }
- }
- g.OptedInAccts = append(g.OptedInAccts, sender)
- g.OptedInAcctsIndices = append(g.OptedInAcctsIndices, senderIdx)
-
- acctOptIns := appsOptedIn[sender]
- for j := 0; j < maxAppsOptedIn; j++ {
- var appIdx basics.AppIndex
- for {
- appIdx = basics.AppIndex(rand.Intn(g.NumApps) + 1)
- if _, ok := acctOptIns[appIdx]; !ok {
- acctOptIns[appIdx] = struct{}{}
- break
- }
- }
-
- txn := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd + basics.Round(expectedNumOfBlocks),
- GenesisHash: gh,
- },
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: basics.AppIndex(appIdx),
- OnCompletion: transactions.OptInOC,
- },
- }
- stxn := txn.Sign(keys[senderIdx])
- optInTxns = append(optInTxns, stxn)
- }
- appsOptedIn[sender] = acctOptIns
- }
-
- return append(createTxns, optInTxns...), maxTxnPerBlock
-}
-
-func (g *BenchAppOptInsTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
- idx := rand.Intn(len(g.OptedInAcctsIndices))
- senderIdx := g.OptedInAcctsIndices[idx]
- sender := addrs[senderIdx]
- receiverIdx := rand.Intn(len(addrs))
-
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd,
- GenesisHash: gh,
- Note: randomNote(),
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[receiverIdx],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := txn.Sign(keys[senderIdx])
- return stxn
-}
-
-func BenchmarkBlockEvaluatorRAMCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, true, true, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorRAMNoCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, true, false, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorDiskCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, false, true, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorDiskNoCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusCurrentVersion, &g)
-}
-
-func BenchmarkBlockEvaluatorDiskAppOptIns(b *testing.B) {
- g := BenchAppOptInsTxnGenerator{
- NumApps: 500,
- Proto: protocol.ConsensusFuture,
- Program: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
- }
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
-}
-
-func BenchmarkBlockEvaluatorDiskFullAppOptIns(b *testing.B) {
- // program sets all 16 available keys of len 64 bytes to same values of 64 bytes
- source := `#pragma version 5
- txn OnCompletion
- int OptIn
- ==
- bz done
- int 0
- store 0 // save loop var
-loop:
- int 0 // acct index
- byte "012345678901234567890123456789012345678901234567890123456789ABC0"
- int 63
- load 0 // loop var
- int 0x41
- +
- setbyte // str[63] = chr(i + 'A')
- dup // value is the same as key
- app_local_put
- load 0 // loop var
- int 1
- +
- dup
- store 0 // save loop var
- int 16
- <
- bnz loop
-done:
- int 1
-`
- ops, err := logic.AssembleString(source)
- require.NoError(b, err)
- prog := ops.Program
- g := BenchAppOptInsTxnGenerator{
- NumApps: 500,
- Proto: protocol.ConsensusFuture,
- Program: prog,
- }
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
-}
-
-// this variant focuses on benchmarking ledger.go `eval()`, the rest is setup, it runs eval() b.N times.
-func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto protocol.ConsensusVersion, txnSource BenchTxnGenerator) {
- deadlockDisable := deadlock.Opts.Disable
- deadlock.Opts.Disable = true
- defer func() { deadlock.Opts.Disable = deadlockDisable }()
- start := time.Now()
- genesisInitState, addrs, keys := genesisWithProto(100000, proto)
- dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
- cparams := config.Consensus[genesisInitState.Block.CurrentProtocol]
- cparams.MaxTxnBytesPerBlock = 1000000000 // very big, no limit
- config.Consensus[protocol.ConsensusVersion(dbName)] = cparams
- genesisInitState.Block.CurrentProtocol = protocol.ConsensusVersion(dbName)
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(b, err)
- defer testLedgerCleanup(l, dbName, inMem)
-
- dbName2 := dbName + "_2"
- l2, err := OpenLedger(logging.Base(), dbName2, inMem, genesisInitState, cfg)
- require.NoError(b, err)
- defer testLedgerCleanup(l2, dbName2, inMem)
-
- bepprof := os.Getenv("BLOCK_EVAL_PPROF")
- if len(bepprof) > 0 {
- profpath := dbName + "_cpuprof"
- profout, err := os.Create(profpath)
- if err != nil {
- b.Fatal(err)
- return
- }
- b.Logf("%s: cpu profile for b.N=%d", profpath, b.N)
- pprof.StartCPUProfile(profout)
- defer func() {
- pprof.StopCPUProfile()
- profout.Close()
- }()
- }
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- bev, err := l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(b, err)
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
-
- // apply initialization transations if any
- initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
- if len(initSignedTxns) > 0 {
- // all init transactions need to be written to ledger before reopening and benchmarking
- for _, l := range []*Ledger{l, l2} {
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
- }
-
- var numBlocks uint64 = 0
- var validatedBlock *ValidatedBlock
-
- // there are might more transactions than MaxTxnBytesPerBlock allows
- // so make smaller blocks to fit
- for i, stxn := range initSignedTxns {
- err = bev.Transaction(stxn, transactions.ApplyData{})
- require.NoError(b, err)
- if maxTxnPerBlock > 0 && i%maxTxnPerBlock == 0 || i == len(initSignedTxns)-1 {
- validatedBlock, err = bev.GenerateBlock()
- require.NoError(b, err)
- for _, l := range []*Ledger{l, l2} {
- err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(b, err)
- }
- newBlock = bookkeeping.MakeBlock(validatedBlock.blk.BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(b, err)
- numBlocks++
- }
- }
-
- // wait until everying is written and then reload ledgers in order
- // to start reading accounts from DB and not from caches/deltas
- var wg sync.WaitGroup
- for _, l := range []*Ledger{l, l2} {
- wg.Add(1)
- // committing might take a long time, do it parallel
- go func(l *Ledger) {
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(numBlocks, 0, 0)
- l.accts.accountsWriting.Wait()
- l.reloadLedger()
- wg.Done()
- }(l)
- }
- wg.Wait()
-
- newBlock = bookkeeping.MakeBlock(validatedBlock.blk.BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(b, err)
- }
-
- setupDone := time.Now()
- setupTime := setupDone.Sub(start)
- b.Logf("BenchmarkBlockEvaluator setup time %s", setupTime.String())
-
- // test speed of block building
- numTxns := 50000
-
- for i := 0; i < numTxns; i++ {
- stxn := txnSource.Txn(b, addrs, keys, newBlock.Round(), genHash)
- err = bev.Transaction(stxn, transactions.ApplyData{})
- require.NoError(b, err)
- }
-
- validatedBlock, err := bev.GenerateBlock()
- require.NoError(b, err)
-
- blockBuildDone := time.Now()
- blockBuildTime := blockBuildDone.Sub(setupDone)
- b.ReportMetric(float64(blockBuildTime)/float64(numTxns), "ns/block_build_tx")
-
- err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(b, err)
-
- avbDone := time.Now()
- avbTime := avbDone.Sub(blockBuildDone)
- b.ReportMetric(float64(avbTime)/float64(numTxns), "ns/AddValidatedBlock_tx")
-
- // test speed of block validation
- // This should be the same as the eval line in ledger.go AddBlock()
- // This is pulled out to isolate eval() time from db ops of AddValidatedBlock()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if withCrypto {
- _, err = l2.Validate(context.Background(), validatedBlock.blk, backlogPool)
- } else {
- _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil)
- }
- require.NoError(b, err)
- }
-
- abDone := time.Now()
- abTime := abDone.Sub(avbDone)
- b.ReportMetric(float64(abTime)/float64(numTxns*b.N), "ns/eval_validate_tx")
-
- b.StopTimer()
-}
-
-func TestCowCompactCert(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var certRnd basics.Round
- var certType protocol.CompactCertType
- var cert compactcert.Cert
- var atRound basics.Round
- var validate bool
- accts0 := randomAccounts(20, true)
- blocks := make(map[basics.Round]bookkeeping.BlockHeader)
- blockErr := make(map[basics.Round]error)
- ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
- c0 := makeRoundCowState(
- &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
- 0, 0)
-
- certType = protocol.CompactCertType(1234) // bad cert type
- err := c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // no certRnd block
- certType = protocol.CompactCertBasic
- noBlockErr := errors.New("no block")
- blockErr[3] = noBlockErr
- certRnd = 3
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // no votersRnd block
- // this is slightly a mess of things that don't quite line up with likely usage
- validate = true
- var certHdr bookkeeping.BlockHeader
- certHdr.CurrentProtocol = "TestCowCompactCert"
- certHdr.Round = 1
- proto := config.Consensus[certHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[certHdr.CurrentProtocol] = proto
- blocks[certHdr.Round] = certHdr
-
- certHdr.Round = 15
- blocks[certHdr.Round] = certHdr
- certRnd = certHdr.Round
- blockErr[13] = noBlockErr
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // validate fail
- certHdr.Round = 1
- certRnd = certHdr.Round
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // fall through to no err
- validate = false
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.NoError(t, err)
-
- // 100% coverage
-}
-
-// a couple trivial tests that don't need setup
-// see TestBlockEvaluator for more
-func TestTestTransactionGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txgroup []transactions.SignedTxn
- eval := BlockEvaluator{}
- err := eval.TestTransactionGroup(txgroup)
- require.NoError(t, err) // nothing to do, no problem
-
- eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1)
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err) // too many
-}
-
-// test BlockEvaluator.transactionGroup()
-// some trivial checks that require no setup
-func TestPrivateTransactionGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txgroup []transactions.SignedTxnWithAD
- eval := BlockEvaluator{}
- err := eval.transactionGroup(txgroup)
- require.NoError(t, err) // nothing to do, no problem
-
- eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1)
- err = eval.transactionGroup(txgroup)
- require.Error(t, err) // too many
-}
-
-// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet.
-// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning.
-func TestTestnetFixup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval := &BlockEvaluator{}
- var rewardPoolBalance basics.AccountData
- rewardPoolBalance.MicroAlgos.Raw = 1234
- var headerRound basics.Round
- testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
-
- // not a fixup round, no change
- headerRound = 1
- poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, rewardPoolBalance, poolOld)
- require.NoError(t, err)
-
- eval.genesisHash = testnetGenesisHash
- eval.genesisHash[3]++
-
- specialRounds := []basics.Round{1499995, 2926564}
- for _, headerRound = range specialRounds {
- poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, rewardPoolBalance, poolOld)
- require.NoError(t, err)
- }
-
- for _, headerRound = range specialRounds {
- testnetFixupExecution(t, headerRound, 20000000000)
- }
- // do all the setup and do nothing for not a special round
- testnetFixupExecution(t, specialRounds[0]+1, 0)
-}
-
-func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) {
- testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
- // big setup so we can move some algos
- // boilerplate like TestBlockEvaluator, but pretend to be testnet
- genesisInitState, addrs, keys := genesis(10)
- genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash
- genesisInitState.Block.BlockHeader.GenesisID = "testnet"
- genesisInitState.GenesisHash = testnetGenesisHash
-
- // for addr, adata := range genesisInitState.Accounts {
- // t.Logf("%s: %+v", addr.String(), adata)
- // }
- rewardPoolBalance := genesisInitState.Accounts[testPoolAddr]
- nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
- require.NoError(t, err)
-
- // won't work before funding bank
- if poolBonus > 0 {
- _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Error(t, err)
- }
-
- bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A")
-
- // put some algos in the bank so that fixup can pull from this account
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: testnetGenesisHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: bankAddr,
- Amount: basics.MicroAlgos{Raw: 20000000000 * 10},
- },
- }
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
- require.NoError(t, err)
-}
-
-// Test that ModifiedAssetHoldings in StateDelta is set correctly.
-func TestModifiedAssetHoldings(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const assetid basics.AssetIndex = 1
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- Fee: 2000,
- AssetParams: basics.AssetParams{
- Total: 3,
- Decimals: 0,
- Manager: addrs[0],
- Reserve: addrs[0],
- Freeze: addrs[0],
- Clawback: addrs[0],
- },
- }
-
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- Fee: 2000,
- XferAsset: assetid,
- AssetAmount: 0,
- AssetReceiver: addrs[1],
- }
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- vb := l.endBlock(t, eval)
-
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[0],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[1],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- Fee: 1000,
- XferAsset: assetid,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- Fee: 1000,
- ConfigAsset: assetid,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- vb = l.endBlock(t, eval)
-
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[0],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[1],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
-}
-
-// newTestGenesis creates a bunch of accounts, splits up 10B algos
-// between them and the rewardspool and feesink, and gives out the
-// addresses and secrets it creates to enable tests. For special
-// scenarios, manipulate these return values before using newTestLedger.
-func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
- // irrelevant, but deterministic
- sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
- if err != nil {
- panic(err)
- }
- rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
- if err != nil {
- panic(err)
- }
-
- const count = 10
- addrs := make([]basics.Address, count)
- secrets := make([]*crypto.SignatureSecrets, count)
- accts := make(map[basics.Address]basics.AccountData)
-
- // 10 billion microalgos, across N accounts and pool and sink
- amount := 10 * 1000000000 * 1000000 / uint64(count+2)
-
- for i := 0; i < count; i++ {
- // Create deterministic addresses, so that output stays the same, run to run.
- var seed crypto.Seed
- seed[0] = byte(i)
- secrets[i] = crypto.GenerateSignatureSecrets(seed)
- addrs[i] = basics.Address(secrets[i].SignatureVerifier)
-
- adata := basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- }
- accts[addrs[i]] = adata
- }
-
- accts[sink] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- Status: basics.NotParticipating,
- }
-
- accts[rewards] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- }
-
- genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
-
- return genBalances, addrs, secrets
-}
-
-// newTestLedger creates a in memory Ledger that is as realistic as
-// possible. It has Rewards and FeeSink properly configured.
-func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
- l, _, _ := newTestLedgerImpl(t, balances, true)
- return l
-}
-
-func newTestLedgerOnDisk(t testing.TB, balances bookkeeping.GenesisBalances) (*Ledger, string, bookkeeping.Block) {
- return newTestLedgerImpl(t, balances, false)
-}
-
-func newTestLedgerImpl(t testing.TB, balances bookkeeping.GenesisBalances, inMem bool) (*Ledger, string, bookkeeping.Block) {
- var genHash crypto.Digest
- crypto.RandBytes(genHash[:])
- genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
- balances, "test", genHash)
- require.False(t, genBlock.FeeSink.IsZero())
- require.False(t, genBlock.RewardsPool.IsZero())
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, InitState{
- Block: genBlock,
- Accounts: balances.Balances,
- GenesisHash: genHash,
- }, cfg)
- require.NoError(t, err)
- return l, dbName, genBlock
-}
-
-// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func (ledger *Ledger) nextBlock(t testing.TB) *BlockEvaluator {
- rnd := ledger.Latest()
- hdr, err := ledger.BlockHdr(rnd)
- require.NoError(t, err)
-
- nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
- eval, err := ledger.StartEvaluator(nextHdr, 0)
- require.NoError(t, err)
- return eval
-}
-
-// endBlock completes the block being created, returns the ValidatedBlock for inspection
-func (ledger *Ledger) endBlock(t testing.TB, eval *BlockEvaluator) *ValidatedBlock {
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
- err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(t, err)
- return validatedBlock
-}
-
-// lookup gets the current accountdata for an address
-func (ledger *Ledger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
- rnd := ledger.Latest()
- ad, err := ledger.Lookup(rnd, addr)
- require.NoError(t, err)
- return ad
-}
-
-// micros gets the current microAlgo balance for an address
-func (ledger *Ledger) micros(t testing.TB, addr basics.Address) uint64 {
- return ledger.lookup(t, addr).MicroAlgos.Raw
-}
-
-// asa gets the current balance and optin status for some asa for an address
-func (ledger *Ledger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
- if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
- return holding.Amount, true
- }
- return 0, false
-}
-
-// asaParams gets the asset params for a given asa index
-func (ledger *Ledger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
- creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
- if err != nil {
- return basics.AssetParams{}, err
- }
- if !ok {
- return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
- }
- if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
- return params, nil
- }
- return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
-}
-
-func (eval *BlockEvaluator) fillDefaults(txn *txntest.Txn) {
- if txn.GenesisHash.IsZero() {
- txn.GenesisHash = eval.genesisHash
- }
- if txn.FirstValid == 0 {
- txn.FirstValid = eval.Round()
- }
- txn.FillDefaults(eval.proto)
-}
-
-func (eval *BlockEvaluator) txn(t testing.TB, txn *txntest.Txn, problem ...string) {
- t.Helper()
- eval.fillDefaults(txn)
- stxn := txn.SignedTxn()
- err := eval.testTransaction(stxn, eval.state.child(1))
- if err != nil {
- if len(problem) == 1 {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- if len(problem) == 1 {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- require.Len(t, problem, 0)
-}
-
-func (eval *BlockEvaluator) txns(t testing.TB, txns ...*txntest.Txn) {
- t.Helper()
- for _, txn := range txns {
- eval.txn(t, txn)
- }
-}
-
-func (eval *BlockEvaluator) txgroup(t testing.TB, txns ...*txntest.Txn) error {
- t.Helper()
- for _, txn := range txns {
- eval.fillDefaults(txn)
- }
- txgroup := txntest.SignedTxns(txns...)
-
- err := eval.TestTransactionGroup(txgroup)
- if err != nil {
- return err
- }
-
- err = eval.transactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
- return err
-}
-
-func TestRewardsInAD(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- payTxn := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[1]}
-
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- eval := l.nextBlock(t)
- l.endBlock(t, eval)
- }
-
- eval := l.nextBlock(t)
- eval.txn(t, &payTxn)
- payInBlock := eval.block.Payset[0]
- require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
- require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
- l.endBlock(t, eval)
-}
-
-func TestMinBalanceChanges(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 3,
- Manager: addrs[1],
- Reserve: addrs[2],
- Freeze: addrs[3],
- Clawback: addrs[4],
- },
- }
-
- const expectedID basics.AssetIndex = 1
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[5],
- }
-
- ad0init := l.lookup(t, addrs[0])
- ad5init := l.lookup(t, addrs[5])
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- l.endBlock(t, eval)
-
- ad0new := l.lookup(t, addrs[0])
- ad5new := l.lookup(t, addrs[5])
-
- proto := config.Consensus[eval.block.BlockHeader.CurrentProtocol]
- // Check balance and min balance requirement changes
- require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
- require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[1], // The manager, not the creator
- ConfigAsset: expectedID,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- l.endBlock(t, eval)
-
- ad0final := l.lookup(t, addrs[0])
- ad5final := l.lookup(t, addrs[5])
- // Check we got our balance "back"
- require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
- require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
-}
-
-// Test that ModifiedAppLocalStates in StateDelta is set correctly.
-func TestModifiedAppLocalStates(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "int 1",
- }
-
- optInTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
- }
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- vb := l.endBlock(t, eval)
-
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
- {
- aa := ledgercore.AccountApp{
- Address: addrs[1],
- App: appid,
- }
- created, ok := vb.delta.ModifiedAppLocalStates[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
-
- optOutTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.CloseOutOC,
- }
-
- closeTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: appid,
- OnCompletion: transactions.DeleteApplicationOC,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- vb = l.endBlock(t, eval)
-
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
- {
- aa := ledgercore.AccountApp{
- Address: addrs[1],
- App: appid,
- }
- created, ok := vb.delta.ModifiedAppLocalStates[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
-}
-
-// Test that overriding the consensus parameters effects the generated apply data.
-func TestCustomProtocolParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genesisBalances, addrs, _ := newTestGenesis()
-
- var genHash crypto.Digest
- crypto.RandBytes(genHash[:])
- block, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusV24,
- genesisBalances, "test", genHash)
-
- dbName := fmt.Sprintf("%s", t.Name())
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, true, InitState{
- Block: block,
- Accounts: genesisBalances.Balances,
- GenesisHash: genHash,
- }, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- const assetid basics.AssetIndex = 1
- proto := config.Consensus[protocol.ConsensusV24]
-
- block = bookkeeping.MakeBlock(block.BlockHeader)
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- GenesisHash: block.GenesisHash(),
- AssetParams: basics.AssetParams{
- Total: 200,
- Decimals: 0,
- Manager: addrs[0],
- Reserve: addrs[0],
- Freeze: addrs[0],
- Clawback: addrs[0],
- },
- }
- createTxn.FillDefaults(proto)
- createStib, err := block.BlockHeader.EncodeSignedTxn(
- createTxn.SignedTxn(), transactions.ApplyData{})
- require.NoError(t, err)
-
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- GenesisHash: block.GenesisHash(),
- XferAsset: assetid,
- AssetAmount: 0,
- AssetReceiver: addrs[1],
- }
- optInTxn.FillDefaults(proto)
- optInStib, err := block.BlockHeader.EncodeSignedTxn(
- optInTxn.SignedTxn(), transactions.ApplyData{})
- require.NoError(t, err)
-
- fundTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[0],
- GenesisHash: block.GenesisHash(),
- XferAsset: assetid,
- AssetAmount: 100,
- AssetReceiver: addrs[1],
- }
- fundTxn.FillDefaults(proto)
- fundStib, err := block.BlockHeader.EncodeSignedTxn(
- fundTxn.SignedTxn(), transactions.ApplyData{})
- require.NoError(t, err)
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- GenesisHash: block.GenesisHash(),
- XferAsset: assetid,
- AssetAmount: 30,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
- optOutTxn.FillDefaults(proto)
- optOutStib, err := block.BlockHeader.EncodeSignedTxn(
- optOutTxn.SignedTxn(), transactions.ApplyData{})
- require.NoError(t, err)
-
- block.Payset = []transactions.SignedTxnInBlock{
- createStib, optInStib, fundStib, optOutStib,
- }
-
- proto.EnableAssetCloseAmount = true
- _, modifiedTxns, err := Eval(l, &block, proto)
- require.NoError(t, err)
-
- require.Equal(t, 4, len(modifiedTxns))
- assert.Equal(t, uint64(70), modifiedTxns[3].AssetClosingAmount)
-}
-
-// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
-// and do not cause any MaximumMinimumBalance problems
-func TestAppInsMinBalance(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- maxAppsOptedIn := config.Consensus[protocol.ConsensusFuture].MaxAppsOptedIn
- require.Greater(t, maxAppsOptedIn, 0)
- maxAppsCreated := config.Consensus[protocol.ConsensusFuture].MaxAppsCreated
- require.Greater(t, maxAppsCreated, 0)
- maxLocalSchemaEntries := config.Consensus[protocol.ConsensusFuture].MaxLocalSchemaEntries
- require.Greater(t, maxLocalSchemaEntries, uint64(0))
-
- txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
- txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
- appsCreated := make(map[basics.Address]int, len(addrs)-1)
-
- acctIdx := 0
- for i := 0; i < maxAppsOptedIn; i++ {
- creator := addrs[acctIdx]
- createTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: creator,
- ApprovalProgram: "int 1",
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- Note: randomNote(),
- }
- txnsCreate = append(txnsCreate, &createTxn)
- count := appsCreated[creator]
- count++
- appsCreated[creator] = count
- if count == maxAppsCreated {
- acctIdx++
- }
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[9],
- ApplicationID: appid + basics.AppIndex(i),
- OnCompletion: transactions.OptInOC,
- }
- txnsOptIn = append(txnsOptIn, &optInTxn)
- }
-
- eval := l.nextBlock(t)
- txns := append(txnsCreate, txnsOptIn...)
- eval.txns(t, txns...)
- vb := l.endBlock(t, eval)
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 50)
-}
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
new file mode 100644
index 000000000..52974c7d3
--- /dev/null
+++ b/ledger/evalbench_test.go
@@ -0,0 +1,440 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "runtime/pprof"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+// BenchTxnGenerator generates transactions as long as asked for
+type BenchTxnGenerator interface {
+ // Prepare should be used for making pre-benchmark ledger initialization
+ // like accounts funding, assets or apps creation
+ Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int)
+ // Txn generates a single transaction
+ Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn
+}
+
+// BenchPaymentTxnGenerator generates payment transactions
+type BenchPaymentTxnGenerator struct {
+ counter int
+}
+
+func (g *BenchPaymentTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
+ return nil, 0
+}
+
+func (g *BenchPaymentTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
+ sender := g.counter % len(addrs)
+ receiver := (g.counter + 1) % len(addrs)
+ // The following would create more random selection of accounts, and prevent a cache of half of the accounts..
+ // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)})
+ // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs))
+ // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs))
+
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[sender],
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd,
+ GenesisHash: gh,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[receiver],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := txn.Sign(keys[sender])
+ g.counter++
+ return stxn
+}
+
+// BenchAppTxnGenerator generates app opt in transactions
+type BenchAppOptInsTxnGenerator struct {
+ NumApps int
+ Proto protocol.ConsensusVersion
+ Program []byte
+ OptedInAccts []basics.Address
+ OptedInAcctsIndices []int
+}
+
+func (g *BenchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
+ maxLocalSchemaEntries := config.Consensus[g.Proto].MaxLocalSchemaEntries
+ maxAppsOptedIn := config.Consensus[g.Proto].MaxAppsOptedIn
+
+ // this function might create too much transaction even to fit into a single block
+ // estimate number of smaller blocks needed in order to set LastValid properly
+ const numAccts = 10000
+ const maxTxnPerBlock = 10000
+ expectedTxnNum := g.NumApps + numAccts*maxAppsOptedIn
+ expectedNumOfBlocks := expectedTxnNum/maxTxnPerBlock + 1
+
+ createTxns := make([]transactions.SignedTxn, 0, g.NumApps)
+ for i := 0; i < g.NumApps; i++ {
+ creatorIdx := rand.Intn(len(addrs))
+ creator := addrs[creatorIdx]
+ txn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: creator,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd + basics.Round(expectedNumOfBlocks),
+ GenesisHash: gh,
+ Note: ledgertesting.RandomNote(),
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApprovalProgram: g.Program,
+ ClearStateProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ },
+ }
+ stxn := txn.Sign(keys[creatorIdx])
+ createTxns = append(createTxns, stxn)
+ }
+
+ appsOptedIn := make(map[basics.Address]map[basics.AppIndex]struct{}, numAccts)
+
+ optInTxns := make([]transactions.SignedTxn, 0, numAccts*maxAppsOptedIn)
+
+ for i := 0; i < numAccts; i++ {
+ var senderIdx int
+ var sender basics.Address
+ for {
+ senderIdx = rand.Intn(len(addrs))
+ sender = addrs[senderIdx]
+ if len(appsOptedIn[sender]) < maxAppsOptedIn {
+ appsOptedIn[sender] = make(map[basics.AppIndex]struct{}, maxAppsOptedIn)
+ break
+ }
+ }
+ g.OptedInAccts = append(g.OptedInAccts, sender)
+ g.OptedInAcctsIndices = append(g.OptedInAcctsIndices, senderIdx)
+
+ acctOptIns := appsOptedIn[sender]
+ for j := 0; j < maxAppsOptedIn; j++ {
+ var appIdx basics.AppIndex
+ for {
+ appIdx = basics.AppIndex(rand.Intn(g.NumApps) + 1)
+ if _, ok := acctOptIns[appIdx]; !ok {
+ acctOptIns[appIdx] = struct{}{}
+ break
+ }
+ }
+
+ txn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd + basics.Round(expectedNumOfBlocks),
+ GenesisHash: gh,
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: basics.AppIndex(appIdx),
+ OnCompletion: transactions.OptInOC,
+ },
+ }
+ stxn := txn.Sign(keys[senderIdx])
+ optInTxns = append(optInTxns, stxn)
+ }
+ appsOptedIn[sender] = acctOptIns
+ }
+
+ return append(createTxns, optInTxns...), maxTxnPerBlock
+}
+
+func (g *BenchAppOptInsTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
+ idx := rand.Intn(len(g.OptedInAcctsIndices))
+ senderIdx := g.OptedInAcctsIndices[idx]
+ sender := addrs[senderIdx]
+ receiverIdx := rand.Intn(len(addrs))
+
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd,
+ GenesisHash: gh,
+ Note: ledgertesting.RandomNote(),
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[receiverIdx],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := txn.Sign(keys[senderIdx])
+ return stxn
+}
+
+func BenchmarkBlockEvaluatorRAMCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, true, true, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorRAMNoCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, true, false, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorDiskCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, false, true, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorDiskNoCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusCurrentVersion, &g)
+}
+
+func BenchmarkBlockEvaluatorDiskAppOptIns(b *testing.B) {
+ g := BenchAppOptInsTxnGenerator{
+ NumApps: 500,
+ Proto: protocol.ConsensusFuture,
+ Program: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ }
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
+}
+
+func BenchmarkBlockEvaluatorDiskFullAppOptIns(b *testing.B) {
+ // program sets all 16 available keys of len 64 bytes to same values of 64 bytes
+ source := `#pragma version 5
+ txn OnCompletion
+ int OptIn
+ ==
+ bz done
+ int 0
+ store 0 // save loop var
+loop:
+ int 0 // acct index
+ byte "012345678901234567890123456789012345678901234567890123456789ABC0"
+ int 63
+ load 0 // loop var
+ int 0x41
+ +
+ setbyte // str[63] = chr(i + 'A')
+ dup // value is the same as key
+ app_local_put
+ load 0 // loop var
+ int 1
+ +
+ dup
+ store 0 // save loop var
+ int 16
+ <
+ bnz loop
+done:
+ int 1
+`
+ ops, err := logic.AssembleString(source)
+ require.NoError(b, err)
+ prog := ops.Program
+ g := BenchAppOptInsTxnGenerator{
+ NumApps: 500,
+ Proto: protocol.ConsensusFuture,
+ Program: prog,
+ }
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
+}
+
+func testLedgerCleanup(l *Ledger, dbName string, inMem bool) {
+ l.Close()
+ if !inMem {
+ hits, err := filepath.Glob(dbName + "*.sqlite")
+ if err != nil {
+ return
+ }
+ for _, fname := range hits {
+ os.Remove(fname)
+ }
+ }
+}
+
+// this variant focuses on benchmarking ledger.go `Eval()`, the rest is setup, it runs Eval() b.N times.
+func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto protocol.ConsensusVersion, txnSource BenchTxnGenerator) {
+
+ deadlockDisable := deadlock.Opts.Disable
+ deadlock.Opts.Disable = true
+ defer func() { deadlock.Opts.Disable = deadlockDisable }()
+ start := time.Now()
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(100000, proto)
+ dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
+ cparams := config.Consensus[genesisInitState.Block.CurrentProtocol]
+ cparams.MaxTxnBytesPerBlock = 1000000000 // very big, no limit
+ config.Consensus[protocol.ConsensusVersion(dbName)] = cparams
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusVersion(dbName)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+ defer testLedgerCleanup(l, dbName, inMem)
+
+ dbName2 := dbName + "_2"
+ l2, err := OpenLedger(logging.Base(), dbName2, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+ defer testLedgerCleanup(l2, dbName2, inMem)
+
+ bepprof := os.Getenv("BLOCK_EVAL_PPROF")
+ if len(bepprof) > 0 {
+ profpath := dbName + "_cpuprof"
+ profout, err := os.Create(profpath)
+ if err != nil {
+ b.Fatal(err)
+ return
+ }
+ b.Logf("%s: cpu profile for b.N=%d", profpath, b.N)
+ pprof.StartCPUProfile(profout)
+ defer func() {
+ pprof.StopCPUProfile()
+ profout.Close()
+ }()
+ }
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ bev, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+
+ genHash := l.GenesisHash()
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+
+ // apply initialization transations if any
+ initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
+ if len(initSignedTxns) > 0 {
+
+ var numBlocks uint64 = 0
+ var validatedBlock *ledgercore.ValidatedBlock
+
+ // there are might more transactions than MaxTxnBytesPerBlock allows
+ // so make smaller blocks to fit
+ for i, stxn := range initSignedTxns {
+ err = bev.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(b, err)
+ if maxTxnPerBlock > 0 && i%maxTxnPerBlock == 0 || i == len(initSignedTxns)-1 {
+ validatedBlock, err = bev.GenerateBlock()
+ require.NoError(b, err)
+ for _, l := range []*Ledger{l, l2} {
+ err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(b, err)
+ }
+ newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+ numBlocks++
+ }
+ }
+
+ // wait until everying is written and then reload ledgers in order
+ // to start reading accounts from DB and not from caches/deltas
+ var wg sync.WaitGroup
+ for _, l := range []*Ledger{l, l2} {
+ wg.Add(1)
+ // committing might take a long time, do it parallel
+ go func(l *Ledger) {
+ commitRound(numBlocks, 0, l)
+ l.reloadLedger()
+ wg.Done()
+ }(l)
+ }
+ wg.Wait()
+
+ newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+ }
+
+ setupDone := time.Now()
+ setupTime := setupDone.Sub(start)
+ b.Logf("BenchmarkBlockEvaluator setup time %s", setupTime.String())
+
+ // test speed of block building
+ numTxns := 50000
+
+ for i := 0; i < numTxns; i++ {
+ stxn := txnSource.Txn(b, addrs, keys, newBlock.Round(), genHash)
+ err = bev.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(b, err)
+ }
+
+ validatedBlock, err := bev.GenerateBlock()
+ require.NoError(b, err)
+
+ blockBuildDone := time.Now()
+ blockBuildTime := blockBuildDone.Sub(setupDone)
+ b.ReportMetric(float64(blockBuildTime)/float64(numTxns), "ns/block_build_tx")
+
+ err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(b, err)
+
+ avbDone := time.Now()
+ avbTime := avbDone.Sub(blockBuildDone)
+ b.ReportMetric(float64(avbTime)/float64(numTxns), "ns/AddValidatedBlock_tx")
+
+ // test speed of block validation
+ // This should be the same as the eval line in ledger.go AddBlock()
+ // This is pulled out to isolate Eval() time from db ops of AddValidatedBlock()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if withCrypto {
+ _, err = l2.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ } else {
+ _, err = internal.Eval(context.Background(), l2, validatedBlock.Block(), false, nil, nil)
+ }
+ require.NoError(b, err)
+ }
+
+ abDone := time.Now()
+ abTime := abDone.Sub(avbDone)
+ b.ReportMetric(float64(abTime)/float64(numTxns*b.N), "ns/eval_validate_tx")
+
+ b.StopTimer()
+}
diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go
new file mode 100644
index 000000000..251e7c1f6
--- /dev/null
+++ b/ledger/evalindexer.go
@@ -0,0 +1,194 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// A ledger interface that Indexer implements. This is a simplified version of the
+// LedgerForEvaluator interface. Certain functions that the evaluator doesn't use
+// in the trusting mode are excluded, and the present functions only request data
+// at the latest round.
+type indexerLedgerForEval interface {
+ LatestBlockHdr() (bookkeeping.BlockHeader, error)
+ // The value of the returned map is nil iff the account was not found.
+ LookupWithoutRewards(map[basics.Address]struct{}) (map[basics.Address]*basics.AccountData, error)
+ GetAssetCreator(map[basics.AssetIndex]struct{}) (map[basics.AssetIndex]FoundAddress, error)
+ GetAppCreator(map[basics.AppIndex]struct{}) (map[basics.AppIndex]FoundAddress, error)
+ LatestTotals() (ledgercore.AccountTotals, error)
+}
+
+// FoundAddress is a wrapper for an address and a boolean.
+type FoundAddress struct {
+ Address basics.Address
+ Exists bool
+}
+
+// EvalForIndexerResources contains resources preloaded from the Indexer database.
+// Indexer is able to do the preloading more efficiently than the evaluator loading
+// resources one by one.
+type EvalForIndexerResources struct {
+ // The map value is nil iff the account does not exist. The account data is owned here.
+ Accounts map[basics.Address]*basics.AccountData
+ Creators map[Creatable]FoundAddress
+}
+
+// Creatable represent a single creatable object.
+type Creatable struct {
+ Index basics.CreatableIndex
+ Type basics.CreatableType
+}
+
+// Converter between indexerLedgerForEval and ledgerForEvaluator interfaces.
+type indexerLedgerConnector struct {
+ il indexerLedgerForEval
+ genesisHash crypto.Digest
+ latestRound basics.Round
+ roundResources EvalForIndexerResources
+}
+
+// BlockHdr is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) {
+ if round != l.latestRound {
+ return bookkeeping.BlockHeader{}, fmt.Errorf(
+ "BlockHdr() evaluator called this function for the wrong round %d, "+
+ "latest round is %d",
+ round, l.latestRound)
+ }
+ return l.il.LatestBlockHdr()
+}
+
+// CheckDup is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
+ // This function is not used by evaluator.
+ return errors.New("CheckDup() not implemented")
+}
+
+// LookupWithoutRewards is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) LookupWithoutRewards(round basics.Round, address basics.Address) (basics.AccountData, basics.Round, error) {
+ // check to see if the account data in the cache.
+ if pad, has := l.roundResources.Accounts[address]; has {
+ if pad == nil {
+ return basics.AccountData{}, round, nil
+ }
+ return *pad, round, nil
+ }
+
+ accountDataMap, err :=
+ l.il.LookupWithoutRewards(map[basics.Address]struct{}{address: {}})
+ if err != nil {
+ return basics.AccountData{}, basics.Round(0), err
+ }
+
+ accountData := accountDataMap[address]
+ if accountData == nil {
+ return basics.AccountData{}, round, nil
+ }
+ return *accountData, round, nil
+}
+
+// GetCreatorForRound is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ var foundAddress FoundAddress
+ var has bool
+ // check to see if the account data in the cache.
+ if foundAddress, has = l.roundResources.Creators[Creatable{Index: cindex, Type: ctype}]; has {
+ return foundAddress.Address, foundAddress.Exists, nil
+ }
+
+ switch ctype {
+ case basics.AssetCreatable:
+ foundAddresses, err :=
+ l.il.GetAssetCreator(map[basics.AssetIndex]struct{}{basics.AssetIndex(cindex): {}})
+ if err != nil {
+ return basics.Address{}, false, err
+ }
+ foundAddress = foundAddresses[basics.AssetIndex(cindex)]
+ case basics.AppCreatable:
+ foundAddresses, err :=
+ l.il.GetAppCreator(map[basics.AppIndex]struct{}{basics.AppIndex(cindex): {}})
+ if err != nil {
+ return basics.Address{}, false, err
+ }
+ foundAddress = foundAddresses[basics.AppIndex(cindex)]
+ default:
+ return basics.Address{}, false, fmt.Errorf("unknown creatable type %v", ctype)
+ }
+
+ return foundAddress.Address, foundAddress.Exists, nil
+}
+
+// GenesisHash is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) GenesisHash() crypto.Digest {
+ return l.genesisHash
+}
+
+// Totals is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) LatestTotals() (rnd basics.Round, totals ledgercore.AccountTotals, err error) {
+ totals, err = l.il.LatestTotals()
+ rnd = l.latestRound
+ return
+}
+
+// CompactCertVoters is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) CompactCertVoters(_ basics.Round) (*ledgercore.VotersForRound, error) {
+ // This function is not used by evaluator.
+ return nil, errors.New("CompactCertVoters() not implemented")
+}
+
+func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Digest, latestRound basics.Round, roundResources EvalForIndexerResources) indexerLedgerConnector {
+ return indexerLedgerConnector{
+ il: il,
+ genesisHash: genesisHash,
+ latestRound: latestRound,
+ roundResources: roundResources,
+ }
+}
+
+// EvalForIndexer evaluates a block without validation using the given `proto`.
+// Return the state delta and transactions with modified apply data according to `proto`.
+// This function is used by Indexer which modifies `proto` to retrieve the asset
+// close amount for each transaction even when the real consensus parameters do not
+// support it.
+func EvalForIndexer(il indexerLedgerForEval, block *bookkeeping.Block, proto config.ConsensusParams, resources EvalForIndexerResources) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
+ ilc := makeIndexerLedgerConnector(il, block.GenesisHash(), block.Round()-1, resources)
+
+ eval, err := internal.StartEvaluator(
+ ilc, block.BlockHeader,
+ internal.EvaluatorOptions{
+ PaysetHint: len(block.Payset),
+ ProtoParams: &proto,
+ Generate: false,
+ Validate: false,
+ })
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("EvalForIndexer() err: %w", err)
+ }
+
+ return eval.ProcessBlockForIndexer(block)
+}
diff --git a/ledger/evalindexer_test.go b/ledger/evalindexer_test.go
new file mode 100644
index 000000000..76e4d2e42
--- /dev/null
+++ b/ledger/evalindexer_test.go
@@ -0,0 +1,307 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type indexerLedgerForEvalImpl struct {
+ l *Ledger
+ latestRound basics.Round
+}
+
+func (il indexerLedgerForEvalImpl) LatestBlockHdr() (bookkeeping.BlockHeader, error) {
+ return il.l.BlockHdr(il.latestRound)
+}
+
+// The value of the returned map is nil iff the account was not found.
+func (il indexerLedgerForEvalImpl) LookupWithoutRewards(addresses map[basics.Address]struct{}) (map[basics.Address]*basics.AccountData, error) {
+ res := make(map[basics.Address]*basics.AccountData)
+
+ for address := range addresses {
+ accountData, _, err := il.l.LookupWithoutRewards(il.latestRound, address)
+ if err != nil {
+ return nil, err
+ }
+
+ if accountData.IsZero() {
+ res[address] = nil
+ } else {
+ accountDataCopy := new(basics.AccountData)
+ *accountDataCopy = accountData
+ res[address] = accountDataCopy
+ }
+ }
+
+ return res, nil
+}
+
+func (il indexerLedgerForEvalImpl) GetAssetCreator(map[basics.AssetIndex]struct{}) (map[basics.AssetIndex]FoundAddress, error) {
+ // This function is unused.
+ return nil, errors.New("GetAssetCreator() not implemented")
+}
+
+func (il indexerLedgerForEvalImpl) GetAppCreator(map[basics.AppIndex]struct{}) (map[basics.AppIndex]FoundAddress, error) {
+ // This function is unused.
+ return nil, errors.New("GetAppCreator() not implemented")
+}
+
+func (il indexerLedgerForEvalImpl) LatestTotals() (totals ledgercore.AccountTotals, err error) {
+ _, totals, err = il.l.LatestTotals()
+ return
+}
+
+// Test that overriding the consensus parameters effects the generated apply data.
+func TestEvalForIndexerCustomProtocolParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ block, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusV24,
+ genesisBalances, "test", genHash)
+
+ dbName := fmt.Sprintf("%s", t.Name())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: block,
+ Accounts: genesisBalances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ const assetid basics.AssetIndex = 1
+ proto := config.Consensus[protocol.ConsensusV24]
+
+ block = bookkeeping.MakeBlock(block.BlockHeader)
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ GenesisHash: block.GenesisHash(),
+ AssetParams: basics.AssetParams{
+ Total: 200,
+ Decimals: 0,
+ Manager: addrs[0],
+ Reserve: addrs[0],
+ Freeze: addrs[0],
+ Clawback: addrs[0],
+ },
+ }
+ createTxn.FillDefaults(proto)
+ createStib, err := block.BlockHeader.EncodeSignedTxn(
+ createTxn.SignedTxn(), transactions.ApplyData{})
+ require.NoError(t, err)
+
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ GenesisHash: block.GenesisHash(),
+ XferAsset: assetid,
+ AssetAmount: 0,
+ AssetReceiver: addrs[1],
+ }
+ optInTxn.FillDefaults(proto)
+ optInStib, err := block.BlockHeader.EncodeSignedTxn(
+ optInTxn.SignedTxn(), transactions.ApplyData{})
+ require.NoError(t, err)
+
+ fundTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[0],
+ GenesisHash: block.GenesisHash(),
+ XferAsset: assetid,
+ AssetAmount: 100,
+ AssetReceiver: addrs[1],
+ }
+ fundTxn.FillDefaults(proto)
+ fundStib, err := block.BlockHeader.EncodeSignedTxn(
+ fundTxn.SignedTxn(), transactions.ApplyData{})
+ require.NoError(t, err)
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ GenesisHash: block.GenesisHash(),
+ XferAsset: assetid,
+ AssetAmount: 30,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+ optOutTxn.FillDefaults(proto)
+ optOutStib, err := block.BlockHeader.EncodeSignedTxn(
+ optOutTxn.SignedTxn(), transactions.ApplyData{})
+ require.NoError(t, err)
+
+ block.Payset = []transactions.SignedTxnInBlock{
+ createStib, optInStib, fundStib, optOutStib,
+ }
+
+ il := indexerLedgerForEvalImpl{
+ l: l,
+ latestRound: 0,
+ }
+ proto.EnableAssetCloseAmount = true
+ _, modifiedTxns, err := EvalForIndexer(il, &block, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ require.Equal(t, 4, len(modifiedTxns))
+ assert.Equal(t, uint64(70), modifiedTxns[3].AssetClosingAmount)
+}
+
+// TestEvalForIndexerForExpiredAccounts tests that the EvalForIndexer function will correctly mark accounts offline
+func TestEvalForIndexerForExpiredAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ block, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ genesisBalances, "test", genHash)
+
+ dbName := fmt.Sprintf("%s", t.Name())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: block,
+ Accounts: genesisBalances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ proto := config.Consensus[protocol.ConsensusFuture]
+
+ block = bookkeeping.MakeBlock(block.BlockHeader)
+
+ il := indexerLedgerForEvalImpl{
+ l: l,
+ latestRound: 0,
+ }
+
+ _, _, err = EvalForIndexer(il, &block, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ badBlock := block
+ // First validate that bad block is fine if we dont touch it...
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ // Introduce an unknown address, but this time the Eval function is called with parameters that
+ // don't necessarily mean that this will cause an error. Just that an empty address will be added
+ badBlock.ExpiredParticipationAccounts = append(badBlock.ExpiredParticipationAccounts, basics.Address{123})
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ badBlock = block
+
+ // Now we add way too many accounts which will cause resetExpiredOnlineAccountsParticipationKeys() to fail
+ addressToCopy := addrs[0]
+
+ for i := 0; i < proto.MaxProposedExpiredOnlineAccounts+1; i++ {
+ badBlock.ExpiredParticipationAccounts = append(badBlock.ExpiredParticipationAccounts, addressToCopy)
+ }
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.Error(t, err)
+
+ // Sanity Check
+
+ badBlock = block
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+}
+
+// Test that preloading data in cow base works as expected.
+func TestResourceCaching(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var address basics.Address
+ _, err := rand.Read(address[:])
+ require.NoError(t, err)
+
+ genesisInitState, _, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ block := bookkeeping.MakeBlock(genesisBlockHeader)
+
+ resources := EvalForIndexerResources{
+ Accounts: map[basics.Address]*basics.AccountData{
+ address: {
+ MicroAlgos: basics.MicroAlgos{Raw: 5},
+ },
+ },
+ Creators: map[Creatable]FoundAddress{
+ {Index: basics.CreatableIndex(6), Type: basics.AssetCreatable}: {Address: address, Exists: true},
+ {Index: basics.CreatableIndex(6), Type: basics.AppCreatable}: {Address: address, Exists: false},
+ },
+ }
+
+ ilc := makeIndexerLedgerConnector(indexerLedgerForEvalImpl{l: l, latestRound: basics.Round(0)}, block.GenesisHash(), block.Round()-1, resources)
+
+ {
+ accountData, rnd, err := ilc.LookupWithoutRewards(basics.Round(0), address)
+ require.NoError(t, err)
+ assert.Equal(t, basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 5}}, accountData)
+ assert.Equal(t, basics.Round(0), rnd)
+ }
+ {
+ address, found, err := ilc.GetCreatorForRound(basics.Round(0), basics.CreatableIndex(6), basics.AssetCreatable)
+ require.NoError(t, err)
+ require.True(t, found)
+ assert.Equal(t, address, address)
+ }
+ {
+ _, found, err := ilc.GetCreatorForRound(basics.Round(0), basics.CreatableIndex(6), basics.AppCreatable)
+ require.NoError(t, err)
+ require.False(t, found)
+ }
+}
diff --git a/ledger/appcow.go b/ledger/internal/appcow.go
index 22623c13c..9a136f4bf 100644
--- a/ledger/appcow.go
+++ b/ledger/internal/appcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+//msgp: ignore storageAction
type storageAction uint64
const (
@@ -457,20 +458,16 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa
}
// MakeDebugBalances creates a ledger suitable for dryrun and debugger
-func MakeDebugBalances(l ledgerForCowBase, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
- base := &roundCowBase{
- l: l,
- rnd: round - 1,
- proto: config.Consensus[proto],
- accounts: make(map[basics.Address]basics.AccountData),
- }
+func MakeDebugBalances(l LedgerForCowBase, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
+ base := makeRoundCowBase(l, round-1, 0, basics.Round(0), config.Consensus[proto])
hdr := bookkeeping.BlockHeader{
Round: round,
UpgradeState: bookkeeping.UpgradeState{CurrentProtocol: proto},
}
hint := 2
- cb := makeRoundCowState(base, hdr, config.Consensus[proto], prevTimestamp, hint)
+ // passing an empty AccountTotals here is fine since it's only being used by the top level cow state object.
+ cb := makeRoundCowState(base, hdr, config.Consensus[proto], prevTimestamp, ledgercore.AccountTotals{}, hint)
return cb
}
@@ -488,7 +485,12 @@ func (cb *roundCowState) StatefulEval(params logic.EvalParams, aidx basics.AppIn
var cx *logic.EvalContext
pass, cx, err = logic.EvalStatefulCx(program, params)
if err != nil {
- return false, transactions.EvalDelta{}, ledgercore.LogicEvalError{Err: err}
+ var details string
+ if cx != nil {
+ pc, det := cx.PcDetails()
+ details = fmt.Sprintf("pc=%d, opcodes=%s", pc, det)
+ }
+ return false, transactions.EvalDelta{}, ledgercore.LogicEvalError{Err: err, Details: details}
}
// If program passed, build our eval delta, and commit to state changes
diff --git a/ledger/appcow_test.go b/ledger/internal/appcow_test.go
index 28096d8bc..978854eb8 100644
--- a/ledger/appcow_test.go
+++ b/ledger/internal/appcow_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -184,7 +185,7 @@ func randomAddrApps(n int) ([]storagePtr, []basics.Address) {
aidx: basics.AppIndex(rand.Intn(100000) + 1),
global: rand.Intn(2) == 0,
}
- outa[i] = randomAddress()
+ outa[i] = ledgertesting.RandomAddress()
}
return out, outa
}
@@ -197,7 +198,7 @@ func TestCowStorage(t *testing.T) {
bh.CurrentProtocol = protocol.ConsensusCurrentVersion
proto, ok := config.Consensus[bh.CurrentProtocol]
require.True(t, ok)
- cow := makeRoundCowState(&ml, bh, proto, 0, 0)
+ cow := makeRoundCowState(&ml, bh, proto, 0, ledgercore.AccountTotals{}, 0)
allSptrs, allAddrs := randomAddrApps(10)
st := makeStateTracker()
@@ -363,8 +364,8 @@ func TestCowBuildDelta(t *testing.T) {
a := require.New(t)
- creator := randomAddress()
- sender := randomAddress()
+ creator := ledgertesting.RandomAddress()
+ sender := ledgertesting.RandomAddress()
aidx := basics.AppIndex(2)
cow := roundCowState{}
@@ -941,7 +942,7 @@ func TestCowAllocated(t *testing.T) {
aidx := basics.AppIndex(1)
c := getCow([]modsData{})
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {storagePtr{aidx, false}: &storageDelta{action: allocAction}},
}
@@ -950,7 +951,7 @@ func TestCowAllocated(t *testing.T) {
// ensure other requests go down to roundCowParent
a.Panics(func() { c.allocated(addr1, aidx+1, false) })
- a.Panics(func() { c.allocated(getRandomAddress(a), aidx, false) })
+ a.Panics(func() { c.allocated(ledgertesting.RandomAddress(), aidx, false) })
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {storagePtr{aidx, true}: &storageDelta{action: allocAction}},
@@ -959,7 +960,7 @@ func TestCowAllocated(t *testing.T) {
// ensure other requests go down to roundCowParent
a.Panics(func() { c.allocated(addr1, aidx+1, true) })
- a.Panics(func() { c.allocated(getRandomAddress(a), aidx, true) })
+ a.Panics(func() { c.allocated(ledgertesting.RandomAddress(), aidx, true) })
}
func TestCowGetCreator(t *testing.T) {
@@ -967,7 +968,7 @@ func TestCowGetCreator(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -990,7 +991,7 @@ func TestCowGetters(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -1008,11 +1009,11 @@ func TestCowGet(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
bre := basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 100}}
c.mods.Accts.Upsert(addr1, bre)
@@ -1025,7 +1026,7 @@ func TestCowGet(t *testing.T) {
a.Equal(bre, bra)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.Get(getRandomAddress(a), true) })
+ a.Panics(func() { c.Get(ledgertesting.RandomAddress(), true) })
}
func TestCowGetKey(t *testing.T) {
@@ -1033,7 +1034,7 @@ func TestCowGetKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -1097,7 +1098,7 @@ func TestCowGetKey(t *testing.T) {
a.Equal(tv, val)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.GetKey(getRandomAddress(a), aidx, false, "lkey", 0) })
+ a.Panics(func() { c.GetKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey", 0) })
}
@@ -1106,7 +1107,7 @@ func TestCowSetKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1177,7 +1178,7 @@ func TestCowSetKey(t *testing.T) {
a.NoError(err)
// check local
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {
storagePtr{aidx, false}: &storageDelta{
@@ -1192,7 +1193,7 @@ func TestCowSetKey(t *testing.T) {
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.SetKey(getRandomAddress(a), aidx, false, key, tv, 0) })
+ a.Panics(func() { c.SetKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv, 0) })
}
@@ -1201,7 +1202,7 @@ func TestCowSetKeyVFuture(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1237,7 +1238,7 @@ func TestCowAccountIdx(t *testing.T) {
a := require.New(t)
l := emptyLedger{}
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1284,7 +1285,7 @@ func TestCowDelKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1327,6 +1328,6 @@ func TestCowDelKey(t *testing.T) {
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.DelKey(getRandomAddress(a), aidx, false, key, 0) })
+ a.Panics(func() { c.DelKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) })
}
diff --git a/ledger/applications.go b/ledger/internal/applications.go
index 9b3cb27b4..fc18699b7 100644
--- a/ledger/applications.go
+++ b/ledger/internal/applications.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
@@ -273,6 +273,11 @@ func (al *logicLedger) Perform(tx *transactions.Transaction, spec transactions.S
return ad, err
}
+ err = apply.Rekey(balances, tx)
+ if err != nil {
+ return ad, err
+ }
+
// compared to eval.transaction() it may seem strange that we
// increment the transaction count *before* transaction
// processing, rather than after. But we need to account for the
@@ -287,12 +292,18 @@ func (al *logicLedger) Perform(tx *transactions.Transaction, spec transactions.S
switch tx.Type {
case protocol.PaymentTx:
err = apply.Payment(tx.PaymentTxnFields, tx.Header, balances, spec, &ad)
- case protocol.AssetTransferTx:
- err = apply.AssetTransfer(tx.AssetTransferTxnFields, tx.Header, balances, spec, &ad)
+
+ case protocol.KeyRegistrationTx:
+ err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, spec, &ad, al.Round())
+
case protocol.AssetConfigTx:
err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, balances, spec, &ad, al.cow.txnCounter())
+
+ case protocol.AssetTransferTx:
+ err = apply.AssetTransfer(tx.AssetTransferTxnFields, tx.Header, balances, spec, &ad)
case protocol.AssetFreezeTx:
err = apply.AssetFreeze(tx.AssetFreezeTxnFields, tx.Header, balances, spec, &ad)
+
default:
err = fmt.Errorf("%s tx in AVM", tx.Type)
}
diff --git a/ledger/internal/applications_test.go b/ledger/internal/applications_test.go
new file mode 100644
index 000000000..94efcef1a
--- /dev/null
+++ b/ledger/internal/applications_test.go
@@ -0,0 +1,353 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type creatableLocator struct {
+ cidx basics.CreatableIndex
+ ctype basics.CreatableType
+}
+type storeLocator struct {
+ addr basics.Address
+ aidx basics.AppIndex
+ global bool
+}
+type mockCowForLogicLedger struct {
+ rnd basics.Round
+ ts int64
+ cr map[creatableLocator]basics.Address
+ brs map[basics.Address]basics.AccountData
+ stores map[storeLocator]basics.TealKeyValue
+ tcs map[int]basics.CreatableIndex
+ txc uint64
+}
+
+func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
+ br, ok := c.brs[addr]
+ if !ok {
+ return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
+ }
+ return br, nil
+}
+
+func (c *mockCowForLogicLedger) GetCreatableID(groupIdx int) basics.CreatableIndex {
+ return c.tcs[groupIdx]
+}
+
+func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ addr, found := c.cr[creatableLocator{cidx, ctype}]
+ return addr, found, nil
+}
+
+func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ tv, found := kv[key]
+ return tv, found, nil
+}
+
+func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
+ return transactions.EvalDelta{}, nil
+}
+
+func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ kv[key] = value
+ c.stores[storeLocator{addr, aidx, global}] = kv
+ return nil
+}
+
+func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ delete(kv, key)
+ c.stores[storeLocator{addr, aidx, global}] = kv
+ return nil
+}
+
+func (c *mockCowForLogicLedger) round() basics.Round {
+ return c.rnd
+}
+
+func (c *mockCowForLogicLedger) prevTimestamp() int64 {
+ return c.ts
+}
+
+func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
+ _, found := c.stores[storeLocator{addr, aidx, global}]
+ return found, nil
+}
+
+func (c *mockCowForLogicLedger) incTxnCount() {
+ c.txc++
+}
+
+func (c *mockCowForLogicLedger) txnCounter() uint64 {
+ return c.txc
+}
+
+func newCowMock(creatables []modsData) *mockCowForLogicLedger {
+ var m mockCowForLogicLedger
+ m.cr = make(map[creatableLocator]basics.Address, len(creatables))
+ for _, e := range creatables {
+ m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
+ }
+ return &m
+}
+
+func TestLogicLedgerMake(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ _, err := newLogicLedger(nil, 0)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot make logic ledger for app index 0")
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+
+ c := &mockCowForLogicLedger{}
+ _, err = newLogicLedger(c, 0)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot make logic ledger for app index 0")
+
+ _, err = newLogicLedger(c, aidx)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", aidx))
+
+ c = newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+ a.Equal(aidx, l.aidx)
+ a.Equal(c, l.cow)
+}
+
+func TestLogicLedgerBalances(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ addr1 := ledgertesting.RandomAddress()
+ ble := basics.MicroAlgos{Raw: 100}
+ c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
+ bla, err := l.Balance(addr1)
+ a.NoError(err)
+ a.Equal(ble, bla)
+}
+
+func TestLogicLedgerGetters(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ round := basics.Round(1234)
+ c.rnd = round
+ ts := int64(11223344)
+ c.ts = ts
+
+ addr1 := ledgertesting.RandomAddress()
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
+ a.Equal(aidx, l.ApplicationID())
+ a.Equal(round, l.Round())
+ a.Equal(ts, l.LatestTimestamp())
+ a.True(l.OptedIn(addr1, 0))
+ a.True(l.OptedIn(addr1, aidx))
+ a.False(l.OptedIn(addr, 0))
+ a.False(l.OptedIn(addr, aidx))
+}
+
+func TestLogicLedgerAsset(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ addr1 := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ assetIdx := basics.AssetIndex(2)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ _, _, err = l.AssetParams(basics.AssetIndex(aidx))
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
+
+ c.brs = map[basics.Address]basics.AccountData{
+ addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
+ }
+
+ ap, creator, err := l.AssetParams(assetIdx)
+ a.NoError(err)
+ a.Equal(addr1, creator)
+ a.Equal(uint64(1000), ap.Total)
+
+ _, err = l.AssetHolding(addr1, assetIdx)
+ a.Error(err)
+ a.Contains(err.Error(), "has not opted in to asset")
+
+ c.brs = map[basics.Address]basics.AccountData{
+ addr1: {
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
+ Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
+ },
+ }
+
+ ah, err := l.AssetHolding(addr1, assetIdx)
+ a.NoError(err)
+ a.Equal(uint64(99), ah.Amount)
+}
+
+func TestLogicLedgerGetKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ addr1 := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ assetIdx := basics.AssetIndex(2)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
+ a.Error(err)
+ a.False(ok)
+ a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
+ val, ok, err := l.GetGlobal(aidx, "gkey")
+ a.Error(err)
+ a.False(ok)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ val, ok, err = l.GetGlobal(aidx, "gkey")
+ a.NoError(err)
+ a.True(ok)
+ a.Equal(tv, val)
+
+ // check local
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
+ val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
+ a.NoError(err)
+ a.True(ok)
+ a.Equal(tv, val)
+}
+
+func TestLogicLedgerSetKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ err = l.SetGlobal("gkey", tv)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ err = l.SetGlobal("gkey", tv2)
+ a.NoError(err)
+
+ // check local
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
+ err = l.SetLocal(addr, "lkey", tv2, 0)
+ a.NoError(err)
+}
+
+func TestLogicLedgerDelKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ err = l.DelGlobal("gkey")
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ err = l.DelGlobal("gkey")
+ a.NoError(err)
+
+ addr1 := ledgertesting.RandomAddress()
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
+ err = l.DelLocal(addr1, "lkey", 0)
+ a.NoError(err)
+}
diff --git a/ledger/assetcow.go b/ledger/internal/assetcow.go
index ca35788dd..b28d09a7f 100644
--- a/ledger/assetcow.go
+++ b/ledger/internal/assetcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"github.com/algorand/go-algorand/data/basics"
diff --git a/ledger/compactcert.go b/ledger/internal/compactcert.go
index 9d2f2d66f..2f90c8b22 100644
--- a/ledger/compactcert.go
+++ b/ledger/internal/compactcert.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
diff --git a/ledger/compactcert_test.go b/ledger/internal/compactcert_test.go
index 27e466811..690d49375 100644
--- a/ledger/compactcert_test.go
+++ b/ledger/internal/compactcert_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"testing"
diff --git a/ledger/cow.go b/ledger/internal/cow.go
index 1234c191b..4bf546de9 100644
--- a/ledger/cow.go
+++ b/ledger/internal/cow.go
@@ -14,9 +14,10 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
+ "errors"
"fmt"
"github.com/algorand/go-algorand/config"
@@ -77,9 +78,13 @@ type roundCowState struct {
groupIdx int
// track creatables created during each transaction in the round
trackedCreatables map[int]basics.CreatableIndex
+
+ // prevTotals contains the accounts totals for the previous round. It's being used to calculate the totals for the new round
+ // so that we could perform the validation test on these to ensure the block evaluator generate a valid changeset.
+ prevTotals ledgercore.AccountTotals
}
-func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto config.ConsensusParams, prevTimestamp int64, hint int) *roundCowState {
+func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto config.ConsensusParams, prevTimestamp int64, prevTotals ledgercore.AccountTotals, hint int) *roundCowState {
cb := roundCowState{
lookupParent: b,
commitParent: nil,
@@ -87,6 +92,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto conf
mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint, 0),
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
trackedCreatables: make(map[int]basics.CreatableIndex),
+ prevTotals: prevTotals,
}
// compatibilityMode retains producing application' eval deltas under the following rule:
@@ -285,3 +291,39 @@ func (cb *roundCowState) commitToParent() {
func (cb *roundCowState) modifiedAccounts() []basics.Address {
return cb.mods.Accts.ModifiedAccounts()
}
+
+// errUnsupportedChildCowTotalCalculation is returned by CalculateTotals when called by a child roundCowState instance
+var errUnsupportedChildCowTotalCalculation = errors.New("the method CalculateTotals should be called only on a top-level roundCowState")
+
+// CalculateTotals calculates the totals given the changes in the StateDelta.
+// these changes allow the validator to validate that the totals still align with the
+// expected values. ( i.e. total amount of algos in the system should remain consistent )
+func (cb *roundCowState) CalculateTotals() error {
+ // this method applies only for the top level roundCowState
+ if cb.commitParent != nil {
+ return errUnsupportedChildCowTotalCalculation
+ }
+ totals := cb.prevTotals
+ var ot basics.OverflowTracker
+ totals.ApplyRewards(cb.mods.Hdr.RewardsLevel, &ot)
+
+ for i := 0; i < cb.mods.Accts.Len(); i++ {
+ accountAddr, updatedAccountData := cb.mods.Accts.GetByIdx(i)
+ previousAccountData, lookupError := cb.lookupParent.lookup(accountAddr)
+ if lookupError != nil {
+ return fmt.Errorf("roundCowState.CalculateTotals unable to load account data for address %v", accountAddr)
+ }
+ totals.DelAccount(cb.proto, previousAccountData, &ot)
+ totals.AddAccount(cb.proto, updatedAccountData, &ot)
+ }
+
+ if ot.Overflowed {
+ return fmt.Errorf("roundCowState: CalculateTotals %d overflowed totals", cb.mods.Hdr.Round)
+ }
+ if totals.All() != cb.prevTotals.All() {
+ return fmt.Errorf("roundCowState: CalculateTotals sum of money changed from %d to %d", cb.prevTotals.All().Raw, totals.All().Raw)
+ }
+
+ cb.mods.Totals = totals
+ return nil
+}
diff --git a/ledger/cow_test.go b/ledger/internal/cow_test.go
index 3423db360..968df87a0 100644
--- a/ledger/cow_test.go
+++ b/ledger/internal/cow_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"testing"
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -96,7 +97,7 @@ func checkCow(t *testing.T, cow *roundCowState, accts map[basics.Address]basics.
require.Equal(t, d, data)
}
- d, err := cow.lookup(randomAddress())
+ d, err := cow.lookup(ledgertesting.RandomAddress())
require.NoError(t, err)
require.Equal(t, d, basics.AccountData{})
}
@@ -111,19 +112,19 @@ func applyUpdates(cow *roundCowState, updates ledgercore.AccountDeltas) {
func TestCowBalance(t *testing.T) {
partitiontest.PartitionTest(t)
- accts0 := randomAccounts(20, true)
+ accts0 := ledgertesting.RandomAccounts(20, true)
ml := mockLedger{balanceMap: accts0}
c0 := makeRoundCowState(
&ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
- 0, 0)
+ 0, ledgercore.AccountTotals{}, 0)
checkCow(t, c0, accts0)
c1 := c0.child(0)
checkCow(t, c0, accts0)
checkCow(t, c1, accts0)
- updates1, accts1, _ := randomDeltas(10, accts0, 0)
+ updates1, accts1, _ := ledgertesting.RandomDeltas(10, accts0, 0)
applyUpdates(c1, updates1)
checkCow(t, c0, accts0)
checkCow(t, c1, accts1)
@@ -133,7 +134,7 @@ func TestCowBalance(t *testing.T) {
checkCow(t, c1, accts1)
checkCow(t, c2, accts1)
- updates2, accts2, _ := randomDeltas(10, accts1, 0)
+ updates2, accts2, _ := ledgertesting.RandomDeltas(10, accts1, 0)
applyUpdates(c2, updates2)
checkCow(t, c0, accts0)
checkCow(t, c1, accts1)
diff --git a/ledger/eval.go b/ledger/internal/eval.go
index 5730a4f22..1cb050bf6 100644
--- a/ledger/eval.go
+++ b/ledger/internal/eval.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"context"
@@ -27,7 +27,6 @@ import (
"github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
@@ -38,24 +37,42 @@ import (
"github.com/algorand/go-algorand/util/execpool"
)
-// ErrNoSpace indicates insufficient space for transaction in block
-var ErrNoSpace = errors.New("block does not have space for transaction")
+// LedgerForCowBase represents subset of Ledger functionality needed for cow business
+type LedgerForCowBase interface {
+ BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
+ CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
+ LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error)
+ GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
+}
// ErrRoundZero is self-explanatory
var ErrRoundZero = errors.New("cannot start evaluator for round 0")
-// maxPaysetHint makes sure that we don't allocate too much memory up front
-// in the block evaluator, since there cannot reasonably be more than this
-// many transactions in a block.
-const maxPaysetHint = 20000
+// averageEncodedTxnSizeHint is an estimation for the encoded transaction size
+// which is used for preallocating memory upfront in the payset. Preallocating
+// helps to avoid re-allocating storage during the evaluation/validation which
+// is considerably slower.
+const averageEncodedTxnSizeHint = 150
// asyncAccountLoadingThreadCount controls how many go routines would be used
-// to load the account data before the eval() start processing individual
+// to load the account data before the Eval() start processing individual
// transaction group.
const asyncAccountLoadingThreadCount = 4
+// Creatable represent a single creatable object.
+type creatable struct {
+ cindex basics.CreatableIndex
+ ctype basics.CreatableType
+}
+
+// foundAddress is a wrapper for an address and a boolean.
+type foundAddress struct {
+ address basics.Address
+ exists bool
+}
+
type roundCowBase struct {
- l ledgerForCowBase
+ l LedgerForCowBase
// The round number of the previous block, for looking up prior state.
rnd basics.Round
@@ -79,10 +96,38 @@ type roundCowBase struct {
// are beyond the scope of this cache.
// The account data store here is always the account data without the rewards.
accounts map[basics.Address]basics.AccountData
+
+ // Similar cache for asset/app creators.
+ creators map[creatable]foundAddress
+}
+
+func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, compactCertNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
+ return &roundCowBase{
+ l: l,
+ rnd: rnd,
+ txnCount: txnCount,
+ compactCertNextRnd: compactCertNextRnd,
+ proto: proto,
+ accounts: make(map[basics.Address]basics.AccountData),
+ creators: make(map[creatable]foundAddress),
+ }
}
func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- return x.l.GetCreatorForRound(x.rnd, cidx, ctype)
+ creatable := creatable{cindex: cidx, ctype: ctype}
+
+ if foundAddress, ok := x.creators[creatable]; ok {
+ return foundAddress.address, foundAddress.exists, nil
+ }
+
+ address, exists, err := x.l.GetCreatorForRound(x.rnd, cidx, ctype)
+ if err != nil {
+ return basics.Address{}, false, fmt.Errorf(
+ "roundCowBase.getCreator() cidx: %d ctype: %v err: %w", cidx, ctype, err)
+ }
+
+ x.creators[creatable] = foundAddress{address: address, exists: exists}
+ return address, exists, nil
}
// lookup returns the non-rewarded account data for the provided account address. It uses the internal per-round cache
@@ -101,7 +146,7 @@ func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) {
}
func (x *roundCowBase) checkDup(firstValid, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
- return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, TxLease{txl})
+ return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, txl)
}
func (x *roundCowBase) txnCounter() uint64 {
@@ -117,7 +162,7 @@ func (x *roundCowBase) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error)
}
func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- acct, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ acct, err := x.lookup(addr)
if err != nil {
return false, err
}
@@ -136,7 +181,7 @@ func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, glob
// getKey gets the value for a particular key in some storage
// associated with an application globally or locally
func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- ad, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ ad, err := x.lookup(addr)
if err != nil {
return basics.TealValue{}, false, err
}
@@ -166,7 +211,7 @@ func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global
// getStorageCounts counts the storage types used by some account
// associated with an application globally or locally
func (x *roundCowBase) getStorageCounts(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error) {
- ad, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ ad, err := x.lookup(addr)
if err != nil {
return basics.StateSchema{}, err
}
@@ -355,38 +400,49 @@ type BlockEvaluator struct {
blockGenerated bool // prevent repeated GenerateBlock calls
- l ledgerForEvaluator
+ l LedgerForEvaluator
+
+ maxTxnBytesPerBlock int
}
-type ledgerForEvaluator interface {
- ledgerForCowBase
+// LedgerForEvaluator defines the ledger interface needed by the evaluator.
+type LedgerForEvaluator interface {
+ LedgerForCowBase
GenesisHash() crypto.Digest
- Totals(basics.Round) (ledgercore.AccountTotals, error)
- CompactCertVoters(basics.Round) (*VotersForRound, error)
+ LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
+ CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
}
-// ledgerForCowBase represents subset of Ledger functionality needed for cow business
-type ledgerForCowBase interface {
- BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error
- LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error)
- GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
+// EvaluatorOptions defines the evaluator creation options
+type EvaluatorOptions struct {
+ PaysetHint int
+ Validate bool
+ Generate bool
+ MaxTxnBytesPerBlock int
+ ProtoParams *config.ConsensusParams
}
// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
// of the block that the caller is planning to evaluate. If the length of the
// payset being evaluated is known in advance, a paysetHint >= 0 can be
// passed, avoiding unnecessary payset slice growth.
-func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint int) (*BlockEvaluator, error) {
- proto, ok := config.Consensus[hdr.CurrentProtocol]
- if !ok {
- return nil, protocol.Error(hdr.CurrentProtocol)
+func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts EvaluatorOptions) (*BlockEvaluator, error) {
+ var proto config.ConsensusParams
+ if evalOpts.ProtoParams == nil {
+ var ok bool
+ proto, ok = config.Consensus[hdr.CurrentProtocol]
+ if !ok {
+ return nil, protocol.Error(hdr.CurrentProtocol)
+ }
+ } else {
+ proto = *evalOpts.ProtoParams
}
- return startEvaluator(l, hdr, proto, paysetHint, true, true)
-}
+ // if the caller did not provide a valid block size limit, default to the consensus params defaults.
+ if evalOpts.MaxTxnBytesPerBlock <= 0 || evalOpts.MaxTxnBytesPerBlock > proto.MaxTxnBytesPerBlock {
+ evalOpts.MaxTxnBytesPerBlock = proto.MaxTxnBytesPerBlock
+ }
-func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto config.ConsensusParams, paysetHint int, validate bool, generate bool) (*BlockEvaluator, error) {
if hdr.Round == 0 {
return nil, ErrRoundZero
}
@@ -402,39 +458,36 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
return nil, protocol.Error(prevHeader.CurrentProtocol)
}
- base := &roundCowBase{
- l: l,
- // round that lookups come from is previous block. We validate
- // the block at this round below, so underflow will be caught.
- // If we are not validating, we must have previously checked
- // an agreement.Certificate attesting that hdr is valid.
- rnd: hdr.Round - 1,
- txnCount: prevHeader.TxnCounter,
- proto: proto,
- accounts: make(map[basics.Address]basics.AccountData),
- }
+ // Round that lookups come from is previous block. We validate
+ // the block at this round below, so underflow will be caught.
+ // If we are not validating, we must have previously checked
+ // an agreement.Certificate attesting that hdr is valid.
+ base := makeRoundCowBase(
+ l, hdr.Round-1, prevHeader.TxnCounter, basics.Round(0), proto)
eval := &BlockEvaluator{
- validate: validate,
- generate: generate,
+ validate: evalOpts.Validate,
+ generate: evalOpts.Generate,
prevHeader: prevHeader,
block: bookkeeping.Block{BlockHeader: hdr},
specials: transactions.SpecialAddresses{
FeeSink: hdr.FeeSink,
RewardsPool: hdr.RewardsPool,
},
- proto: proto,
- genesisHash: l.GenesisHash(),
- l: l,
+ proto: proto,
+ genesisHash: l.GenesisHash(),
+ l: l,
+ maxTxnBytesPerBlock: evalOpts.MaxTxnBytesPerBlock,
}
// Preallocate space for the payset so that we don't have to
// dynamically grow a slice (if evaluating a whole block).
- if paysetHint > 0 {
- if paysetHint > maxPaysetHint {
- paysetHint = maxPaysetHint
+ if evalOpts.PaysetHint > 0 {
+ maxPaysetHint := evalOpts.MaxTxnBytesPerBlock / averageEncodedTxnSizeHint
+ if evalOpts.PaysetHint > maxPaysetHint {
+ evalOpts.PaysetHint = maxPaysetHint
}
- eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, paysetHint)
+ eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, evalOpts.PaysetHint)
}
base.compactCertNextRnd = eval.prevHeader.CompactCert[protocol.CompactCertBasic].CompactCertNextRound
@@ -451,10 +504,13 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
base.compactCertNextRnd = votersRound + basics.Round(proto.CompactCertRounds)
}
- prevTotals, err := l.Totals(eval.prevHeader.Round)
+ latestRound, prevTotals, err := l.LatestTotals()
if err != nil {
return nil, err
}
+ if latestRound != eval.prevHeader.Round {
+ return nil, ledgercore.ErrNonSequentialBlockEval{EvaluatorRound: hdr.Round, LatestRound: latestRound}
+ }
poolAddr := eval.prevHeader.RewardsPool
// get the reward pool account data without any rewards
@@ -466,16 +522,16 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
// this is expected to be a no-op, but update the rewards on the rewards pool if it was configured to receive rewards ( unlike mainnet ).
incentivePoolData = incentivePoolData.WithUpdatedRewards(prevProto, eval.prevHeader.RewardsLevel)
- if generate {
+ if evalOpts.Generate {
if eval.proto.SupportGenesisHash {
eval.block.BlockHeader.GenesisHash = eval.genesisHash
}
eval.block.BlockHeader.RewardsState = eval.prevHeader.NextRewardsState(hdr.Round, proto, incentivePoolData.MicroAlgos, prevTotals.RewardUnits())
}
// set the eval state with the current header
- eval.state = makeRoundCowState(base, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, paysetHint)
+ eval.state = makeRoundCowState(base, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, prevTotals, evalOpts.PaysetHint)
- if validate {
+ if evalOpts.Validate {
err := eval.block.BlockHeader.PreCheck(eval.prevHeader)
if err != nil {
return nil, err
@@ -593,7 +649,7 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
var group transactions.TxGroup
for gi, txn := range txgroup {
- err := eval.testTransaction(txn, cow)
+ err := eval.TestTransaction(txn, cow)
if err != nil {
return err
}
@@ -625,10 +681,10 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
return nil
}
-// testTransaction performs basic duplicate detection and well-formedness checks
+// TestTransaction performs basic duplicate detection and well-formedness checks
// on a single transaction, but does not actually add the transaction to the block
// evaluator, or modify the block evaluator state in any other visible way.
-func (eval *BlockEvaluator) testTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
+func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
// Transaction valid (not expired)?
err := txn.Txn.Alive(eval.block)
if err != nil {
@@ -654,7 +710,7 @@ func (eval *BlockEvaluator) testTransaction(txn transactions.SignedTxn, cow *rou
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad transactions.ApplyData) error {
- return eval.transactionGroup([]transactions.SignedTxnWithAD{
+ return eval.TransactionGroup([]transactions.SignedTxnWithAD{
{
SignedTxn: txn,
ApplyData: ad,
@@ -662,13 +718,6 @@ func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad transacti
})
}
-// TransactionGroup tentatively adds a new transaction group as part of this block evaluation.
-// If the transaction group cannot be added to the block without violating some constraints,
-// an error is returned and the block evaluator state is unchanged.
-func (eval *BlockEvaluator) TransactionGroup(txads []transactions.SignedTxnWithAD) error {
- return eval.transactionGroup(txads)
-}
-
// prepareEvalParams creates a logic.EvalParams for each ApplicationCall
// transaction in the group
func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWithAD) []*logic.EvalParams {
@@ -716,10 +765,10 @@ func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWi
return res
}
-// transactionGroup tentatively executes a group of transactions as part of this block evaluation.
+// TransactionGroup tentatively executes a group of transactions as part of this block evaluation.
// If the transaction group cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
-func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWithAD) error {
+func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWithAD) error {
// Nothing to do if there are no transactions.
if len(txgroup) == 0 {
return nil
@@ -751,8 +800,8 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
if eval.validate {
groupTxBytes += txib.GetEncodedLength()
- if eval.blockTxBytes+groupTxBytes > eval.proto.MaxTxnBytesPerBlock {
- return ErrNoSpace
+ if eval.blockTxBytes+groupTxBytes > eval.maxTxnBytesPerBlock {
+ return ledgercore.ErrNoSpace
}
}
@@ -927,25 +976,9 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balanc
return
}
- // rekeying: update balrecord.AuthAddr to tx.RekeyTo if provided
- if (tx.RekeyTo != basics.Address{}) {
- var acct basics.AccountData
- acct, err = balances.Get(tx.Sender, false)
- if err != nil {
- return
- }
- // Special case: rekeying to the account's actual address just sets acct.AuthAddr to 0
- // This saves 32 bytes in your balance record if you want to go back to using your original key
- if tx.RekeyTo == tx.Sender {
- acct.AuthAddr = basics.Address{}
- } else {
- acct.AuthAddr = tx.RekeyTo
- }
-
- err = balances.Put(tx.Sender, acct)
- if err != nil {
- return
- }
+ err = apply.Rekey(balances, &tx)
+ if err != nil {
+ return
}
switch tx.Type {
@@ -1010,13 +1043,17 @@ func (eval *BlockEvaluator) compactCertVotersAndTotal() (root crypto.Digest, tot
}
if voters != nil {
- root = voters.Tree.Root()
- total = voters.TotalWeight
+ root, total = voters.Tree.Root(), voters.TotalWeight
}
return
}
+// TestingTxnCounter - the method returns the current evaluator transaction counter. The method is used for testing purposes only.
+func (eval *BlockEvaluator) TestingTxnCounter() uint64 {
+ return eval.state.txnCounter()
+}
+
// Call "endOfBlock" after all the block's rewards and transactions are processed.
func (eval *BlockEvaluator) endOfBlock() error {
if eval.generate {
@@ -1032,6 +1069,8 @@ func (eval *BlockEvaluator) endOfBlock() error {
eval.block.TxnCounter = 0
}
+ eval.generateExpiredOnlineAccountsList()
+
if eval.proto.CompactCertRounds > 0 {
var basicCompactCert bookkeeping.CompactCertState
basicCompactCert.CompactCertVoters, basicCompactCert.CompactCertVotersTotal, err = eval.compactCertVotersAndTotal()
@@ -1046,12 +1085,18 @@ func (eval *BlockEvaluator) endOfBlock() error {
}
}
- return nil
-}
+ err := eval.validateExpiredOnlineAccounts()
+ if err != nil {
+ return err
+ }
+
+ err = eval.resetExpiredOnlineAccountsParticipationKeys()
+ if err != nil {
+ return err
+ }
-// FinalValidation does the validation that must happen after the block is built and all state updates are computed
-func (eval *BlockEvaluator) finalValidation() error {
eval.state.mods.OptimizeAllocatedMemory(eval.proto)
+
if eval.validate {
// check commitments
txnRoot, err := eval.block.PaysetCommit()
@@ -1090,6 +1135,131 @@ func (eval *BlockEvaluator) finalValidation() error {
}
}
+ err = eval.state.CalculateTotals()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// generateExpiredOnlineAccountsList creates the list of the expired participation accounts by traversing over the
+// modified accounts in the state deltas and testing if any of them needs to be reset.
+func (eval *BlockEvaluator) generateExpiredOnlineAccountsList() {
+ if !eval.generate {
+ return
+ }
+ // We are going to find the list of modified accounts and the
+ // current round that is being evaluated.
+ // Then we are going to go through each modified account and
+ // see if it meets the criteria for adding it to the expired
+ // participation accounts list.
+ modifiedAccounts := eval.state.mods.Accts.ModifiedAccounts()
+ currentRound := eval.Round()
+
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+
+ for i := 0; i < len(modifiedAccounts) && len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts) < expectedMaxNumberOfExpiredAccounts; i++ {
+ accountAddr := modifiedAccounts[i]
+ acctDelta, found := eval.state.mods.Accts.Get(accountAddr)
+ if !found {
+ continue
+ }
+
+ // true if the account is online
+ isOnline := acctDelta.Status == basics.Online
+ // true if the accounts last valid round has passed
+ pastCurrentRound := acctDelta.VoteLastValid < currentRound
+
+ if isOnline && pastCurrentRound {
+ eval.block.ParticipationUpdates.ExpiredParticipationAccounts = append(
+ eval.block.ParticipationUpdates.ExpiredParticipationAccounts,
+ accountAddr,
+ )
+ }
+ }
+}
+
+// validateExpiredOnlineAccounts tests the expired online accounts specified in ExpiredParticipationAccounts, and verify
+// that they have all expired and need to be reset.
+func (eval *BlockEvaluator) validateExpiredOnlineAccounts() error {
+ if !eval.validate {
+ return nil
+ }
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+ lengthOfExpiredParticipationAccounts := len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts)
+
+ // If the length of the array is strictly greater than our max then we have an error.
+ // This works when the expected number of accounts is zero (i.e. it is disabled) as well
+ if lengthOfExpiredParticipationAccounts > expectedMaxNumberOfExpiredAccounts {
+ return fmt.Errorf("length of expired accounts (%d) was greater than expected (%d)",
+ lengthOfExpiredParticipationAccounts, expectedMaxNumberOfExpiredAccounts)
+ }
+
+ // For security reasons, we need to make sure that all addresses in the expired participation accounts
+ // are unique. We make this map to keep track of previously seen address
+ addressSet := make(map[basics.Address]bool, lengthOfExpiredParticipationAccounts)
+
+ // Validate that all expired accounts meet the current criteria
+ currentRound := eval.Round()
+ for _, accountAddr := range eval.block.ParticipationUpdates.ExpiredParticipationAccounts {
+
+ if _, exists := addressSet[accountAddr]; exists {
+ // We shouldn't have duplicate addresses...
+ return fmt.Errorf("duplicate address found: %v", accountAddr)
+ }
+
+ // Record that we have seen this address
+ addressSet[accountAddr] = true
+
+ acctData, err := eval.state.lookup(accountAddr)
+ if err != nil {
+ return fmt.Errorf("endOfBlock was unable to retrieve account %v : %w", accountAddr, err)
+ }
+
+ // true if the account is online
+ isOnline := acctData.Status == basics.Online
+ // true if the accounts last valid round has passed
+ pastCurrentRound := acctData.VoteLastValid < currentRound
+
+ if !isOnline {
+ return fmt.Errorf("endOfBlock found %v was not online but %v", accountAddr, acctData.Status)
+ }
+
+ if !pastCurrentRound {
+ return fmt.Errorf("endOfBlock found %v round (%d) was not less than current round (%d)", accountAddr, acctData.VoteLastValid, currentRound)
+ }
+ }
+ return nil
+}
+
+// resetExpiredOnlineAccountsParticipationKeys after all transactions and rewards are processed, modify the accounts so that their status is offline
+func (eval *BlockEvaluator) resetExpiredOnlineAccountsParticipationKeys() error {
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+ lengthOfExpiredParticipationAccounts := len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts)
+
+ // If the length of the array is strictly greater than our max then we have an error.
+ // This works when the expected number of accounts is zero (i.e. it is disabled) as well
+ if lengthOfExpiredParticipationAccounts > expectedMaxNumberOfExpiredAccounts {
+ return fmt.Errorf("length of expired accounts (%d) was greater than expected (%d)",
+ lengthOfExpiredParticipationAccounts, expectedMaxNumberOfExpiredAccounts)
+ }
+
+ for _, accountAddr := range eval.block.ParticipationUpdates.ExpiredParticipationAccounts {
+ acctData, err := eval.state.lookup(accountAddr)
+ if err != nil {
+ return fmt.Errorf("resetExpiredOnlineAccountsParticipationKeys was unable to retrieve account %v : %w", accountAddr, err)
+ }
+
+ // Reset the appropriate account data
+ acctData.ClearOnlineState()
+
+ // Update the account information
+ err = eval.state.Put(accountAddr, acctData)
+ if err != nil {
+ return err
+ }
+ }
return nil
}
@@ -1100,7 +1270,7 @@ func (eval *BlockEvaluator) finalValidation() error {
// After a call to GenerateBlock, the BlockEvaluator can still be used to
// accept transactions. However, to guard against reuse, subsequent calls
// to GenerateBlock on the same BlockEvaluator will fail.
-func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
+func (eval *BlockEvaluator) GenerateBlock() (*ledgercore.ValidatedBlock, error) {
if !eval.generate {
logging.Base().Panicf("GenerateBlock() called but generate is false")
}
@@ -1114,15 +1284,7 @@ func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
return nil, err
}
- err = eval.finalValidation()
- if err != nil {
- return nil, err
- }
-
- vb := ValidatedBlock{
- blk: eval.block,
- delta: eval.state.deltas(),
- }
+ vb := ledgercore.MakeValidatedBlock(eval.block, eval.state.deltas())
eval.blockGenerated = true
proto, ok := config.Consensus[eval.block.BlockHeader.CurrentProtocol]
if !ok {
@@ -1130,7 +1292,7 @@ func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
"unknown consensus version: %s", eval.block.BlockHeader.CurrentProtocol)
}
eval.state = makeRoundCowState(
- eval.state, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp,
+ eval.state, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, eval.state.mods.Totals,
len(eval.block.Payset))
return &vb, nil
}
@@ -1175,19 +1337,19 @@ func (validator *evalTxValidator) run() {
}
}
+// Eval is the main evaluator entrypoint.
// used by Ledger.Validate() Ledger.AddBlock() Ledger.trackerEvalVerified()(accountUpdates.loadFromDisk())
//
-// Validate: eval(ctx, l, blk, true, txcache, executionPool, true)
-// AddBlock: eval(context.Background(), l, blk, false, txcache, nil, true)
-// tracker: eval(context.Background(), l, blk, false, txcache, nil, false)
-func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
- proto, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]
- if !ok {
- return ledgercore.StateDelta{}, protocol.Error(blk.BlockHeader.CurrentProtocol)
- }
-
- eval, err := startEvaluator(
- l, blk.BlockHeader, proto, len(blk.Payset), validate, false)
+// Validate: Eval(ctx, l, blk, true, txcache, executionPool, true)
+// AddBlock: Eval(context.Background(), l, blk, false, txcache, nil, true)
+// tracker: Eval(context.Background(), l, blk, false, txcache, nil, false)
+func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
+ eval, err := StartEvaluator(l, blk.BlockHeader,
+ EvaluatorOptions{
+ PaysetHint: len(blk.Payset),
+ Validate: validate,
+ Generate: false,
+ })
if err != nil {
return ledgercore.StateDelta{}, err
}
@@ -1241,7 +1403,7 @@ transactionGroupLoop:
if !ok {
break transactionGroupLoop
} else if txgroup.err != nil {
- return ledgercore.StateDelta{}, err
+ return ledgercore.StateDelta{}, txgroup.err
}
for _, br := range txgroup.balances {
@@ -1269,7 +1431,7 @@ transactionGroupLoop:
// If validating, do final block checks that depend on our new state
if validate {
- // wait for the validation to complete.
+ // wait for the signature validation to complete.
select {
case <-ctx.Done():
return ledgercore.StateDelta{}, ctx.Err()
@@ -1281,10 +1443,6 @@ transactionGroupLoop:
return ledgercore.StateDelta{}, err
}
}
- err = eval.finalValidation()
- if err != nil {
- return ledgercore.StateDelta{}, err
- }
}
return eval.state.deltas(), nil
@@ -1306,19 +1464,9 @@ func maxAddressesInTxn(proto *config.ConsensusParams) int {
return 7 + proto.MaxAppTxnAccounts
}
-// Write the list of addresses referenced in `txn` to `out`. Addresses might repeat.
-func getTxnAddresses(txn *transactions.Transaction, out *[]basics.Address) {
- *out = (*out)[:0]
-
- *out = append(
- *out, txn.Sender, txn.Receiver, txn.CloseRemainderTo, txn.AssetSender,
- txn.AssetReceiver, txn.AssetCloseTo, txn.FreezeAccount)
- *out = append(*out, txn.ApplicationCallTxnFields.Accounts...)
-}
-
// loadAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group.
// The order of the transaction groups returned by the channel is identical to the one in the input array.
-func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup {
+func loadAccounts(ctx context.Context, l LedgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup {
outChan := make(chan loadedTransactionGroup, len(groups))
go func() {
// groupTask helps to organize the account loading for each transaction group.
@@ -1483,95 +1631,3 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g
}()
return outChan
}
-
-// Validate uses the ledger to validate block blk as a candidate next block.
-// It returns an error if blk is not the expected next block, or if blk is
-// not a valid block (e.g., it has duplicate transactions, overspends some
-// account, etc).
-func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ValidatedBlock, error) {
- delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
- if err != nil {
- return nil, err
- }
-
- vb := ValidatedBlock{
- blk: blk,
- delta: delta,
- }
- return &vb, nil
-}
-
-// ValidatedBlock represents the result of a block validation. It can
-// be used to efficiently add the block to the ledger, without repeating
-// the work of applying the block's changes to the ledger state.
-type ValidatedBlock struct {
- blk bookkeeping.Block
- delta ledgercore.StateDelta
-}
-
-// Block returns the underlying Block for a ValidatedBlock.
-func (vb ValidatedBlock) Block() bookkeeping.Block {
- return vb.blk
-}
-
-// WithSeed returns a copy of the ValidatedBlock with a modified seed.
-func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
- newblock := vb.blk
- newblock.BlockHeader.Seed = s
-
- return ValidatedBlock{
- blk: newblock,
- delta: vb.delta,
- }
-}
-
-// GetBlockAddresses returns all addresses referenced in `block`.
-func GetBlockAddresses(block *bookkeeping.Block) map[basics.Address]struct{} {
- // Reserve a reasonable memory size for the map.
- res := make(map[basics.Address]struct{}, len(block.Payset)+2)
- res[block.FeeSink] = struct{}{}
- res[block.RewardsPool] = struct{}{}
-
- var refAddresses []basics.Address
- for _, stib := range block.Payset {
- getTxnAddresses(&stib.Txn, &refAddresses)
- for _, address := range refAddresses {
- res[address] = struct{}{}
- }
- }
-
- return res
-}
-
-// Eval evaluates a block without validation using the given `proto`. Return the state
-// delta and transactions with modified apply data according to `proto`.
-// This function is used by Indexer which modifies `proto` to retrieve the asset
-// close amount for each transaction even when the real consensus parameters do not
-// support it.
-func Eval(l ledgerForEvaluator, blk *bookkeeping.Block, proto config.ConsensusParams) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
- eval, err := startEvaluator(
- l, blk.BlockHeader, proto, len(blk.Payset), false, false)
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{}, err
- }
-
- paysetgroups, err := blk.DecodePaysetGroups()
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{}, err
- }
-
- for _, group := range paysetgroups {
- err = eval.TransactionGroup(group)
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{}, err
- }
- }
-
- // Finally, process any pending end-of-block state changes.
- err = eval.endOfBlock()
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{}, err
- }
-
- return eval.state.deltas(), eval.block.Payset, nil
-}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
new file mode 100644
index 000000000..816bd6e68
--- /dev/null
+++ b/ledger/internal/eval_blackbox_test.go
@@ -0,0 +1,1081 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal_test
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+func TestBlockEvaluator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[1],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ // Correct signature should work
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Broken signature should fail
+ stbad := st
+ st.Sig[2] ^= 8
+ txgroup := []transactions.SignedTxn{stbad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ // Repeat should fail
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // out of range should fail
+ btxn := txn
+ btxn.FirstValid++
+ btxn.LastValid += 2
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // bogus group should fail
+ btxn = txn
+ btxn.Group[1] = 1
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // mixed fields should fail
+ btxn = txn
+ btxn.XferAsset = 3
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
+ // err = eval.Transaction(st, transactions.ApplyData{})
+ // require.Error(t, err)
+
+ selfTxn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[2],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[2],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := selfTxn.Sign(keys[2])
+
+ // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
+ txgroup = []transactions.SignedTxn{stxn}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ t3 := txn
+ t3.Amount.Raw++
+ t4 := selfTxn
+ t4.Amount.Raw++
+
+ // a group without .Group should fail
+ s3 := t3.Sign(keys[0])
+ s4 := t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // Test a group that should work
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
+ t3.Group = crypto.HashObj(group)
+ t4.Group = t3.Group
+ s3 = t3.Sign(keys[0])
+ s4 = t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ // disagreement on Group id should fail
+ t4bad := t4
+ t4bad.Group[3] ^= 3
+ s4bad := t4bad.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4bad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // missing part of the group should fail
+ txgroup = []transactions.SignedTxn{s3}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ accts := genesisInitState.Accounts
+ bal0 := accts[addrs[0]]
+ bal1 := accts[addrs[1]]
+ bal2 := accts[addrs[2]]
+
+ l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+
+ bal0new, err := l.Lookup(newBlock.Round(), addrs[0])
+ require.NoError(t, err)
+ bal1new, err := l.Lookup(newBlock.Round(), addrs[1])
+ require.NoError(t, err)
+ bal2new, err := l.Lookup(newBlock.Round(), addrs[2])
+ require.NoError(t, err)
+
+ require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
+ require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
+ require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
+}
+
+func TestRekeying(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Pretend rekeying is supported
+ actual := config.Consensus[protocol.ConsensusCurrentVersion]
+ pretend := actual
+ pretend.SupportRekeying = true
+ config.Consensus[protocol.ConsensusCurrentVersion] = pretend
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = actual
+ }()
+
+ // Bring up a ledger
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ // Make a new block
+ nextRound := l.Latest() + basics.Round(1)
+ genHash := l.GenesisHash()
+
+ // Test plan
+ // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
+ makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: nextRound,
+ LastValid: nextRound,
+ GenesisHash: genHash,
+ RekeyTo: rekeyto,
+ Note: []byte{uniq},
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: sender,
+ },
+ }
+ sig := signer.Sign(txn)
+ return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
+ }
+
+ tryBlock := func(stxns []transactions.SignedTxn) error {
+ // We'll make a block using the evaluator.
+ // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
+ // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
+ genesisHdr, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisHdr)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ for _, stxn := range stxns {
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ return err
+ }
+ }
+ validatedBlock, err := eval.GenerateBlock()
+ if err != nil {
+ return err
+ }
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+ _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ return err
+ }
+
+ // Preamble transactions, which all of the blocks in this test will start with
+ // [A -> 0][0,A] (normal transaction)
+ // [A -> B][0,A] (rekey)
+ txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
+ txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
+
+ // Test 1: Do only good things
+ // (preamble)
+ // [A -> 0][B,B] (normal transaction using new key)
+ // [A -> A][B,B] (rekey back to A, transaction still signed by B)
+ // [A -> 0][0,A] (normal transaction again)
+ test1txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
+ makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
+ makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
+ }
+ err = tryBlock(test1txns)
+ require.NoError(t, err)
+
+ // Test 2: Use old key after rekeying
+ // (preamble)
+ // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
+ test2txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
+ }
+ err = tryBlock(test2txns)
+ require.Error(t, err)
+
+ // TODO: More tests
+}
+
+func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*internal.BlockEvaluator, basics.Address, error) {
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ blkHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(blkHeader)
+
+ eval, err := internal.StartEvaluator(l, newBlock.BlockHeader, internal.EvaluatorOptions{
+ Generate: true,
+ Validate: true})
+ require.NoError(t, err)
+
+ ops, err := logic.AssembleString(`#pragma version 2
+ txn ApplicationID
+ bz create
+ byte "caller"
+ txn Sender
+ app_global_put
+ b ok
+create:
+ byte "creator"
+ txn Sender
+ app_global_put
+ok:
+ int 1`)
+ require.NoError(t, err, ops.Errors)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 2\nint 1")
+ require.NoError(t, err)
+ clear := ops.Program
+
+ genHash := l.GenesisHash()
+ header := transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ }
+ appcall1 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ GlobalStateSchema: schema,
+ ApprovalProgram: approval,
+ ClearStateProgram: clear,
+ },
+ }
+
+ appcall2 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 1,
+ },
+ }
+
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2)}
+ appcall1.Group = crypto.HashObj(group)
+ appcall2.Group = crypto.HashObj(group)
+ stxn1 := appcall1.Sign(keys[0])
+ stxn2 := appcall2.Sign(keys[0])
+
+ g := []transactions.SignedTxnWithAD{
+ {
+ SignedTxn: stxn1,
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
+ "creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
+ },
+ ApplicationID: 1,
+ },
+ },
+ {
+ SignedTxn: stxn2,
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
+ "caller": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
+ }},
+ },
+ }
+ txgroup := []transactions.SignedTxn{stxn1, stxn2}
+ err = eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return eval, addrs[0], err
+ }
+ err = eval.TransactionGroup(g)
+ return eval, addrs[0], err
+}
+
+// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
+// the test ensures that
+// commitToParent -> applyChild copies child's cow state usage counts into parent
+// and the usage counts correctly propagated from parent cow to child cow and back
+func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
+}
+
+// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
+// produce correct results when a txn group has storage allocate and storage update actions
+func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
+ require.NoError(t, err)
+
+ vb, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ deltas := vb.Delta()
+
+ ad, _ := deltas.Accts.Get(addr)
+ state := ad.AppParams[1].GlobalState
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func nextBlock(t testing.TB, ledger *ledger.Ledger, generate bool, protoParams *config.ConsensusParams) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ Generate: generate,
+ Validate: false,
+ ProtoParams: protoParams,
+ })
+ require.NoError(t, err)
+ return eval
+}
+
+func fillDefaults(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() {
+ txn.GenesisHash = ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+
+ txn.FillDefaults(ledger.GenesisProto())
+}
+
+func txns(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn1 := range txns {
+ txn(t, ledger, eval, txn1)
+ }
+}
+
+func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ fillDefaults(t, ledger, eval, txn)
+ stxn := txn.SignedTxn()
+ err := eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.Len(t, problem, 0)
+}
+
+func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ fillDefaults(t, ledger, eval, txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ err := eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return err
+ }
+
+ err = eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+ return err
+}
+
+func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
+ genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, consensusVersion)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ protoParams := config.Consensus[consensusVersion]
+ eval := nextBlock(t, l, false, &protoParams)
+
+ appcall1 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ GlobalStateSchema: schema,
+ ApprovalProgram: approvalProgram,
+ }
+
+ appcall2 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ appcall3 := txntest.Txn{
+ Sender: addrs[1],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
+}
+
+// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
+// budgets in a group txn and return an error if the budget is exceeded
+func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ source := func(n int, m int) string {
+ return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
+ strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
+ }
+
+ params := []protocol.ConsensusVersion{
+ protocol.ConsensusV29,
+ protocol.ConsensusFuture,
+ }
+
+ cases := []struct {
+ prog string
+ isSuccessV29 bool
+ isSuccessVFuture bool
+ expectedErrorV29 string
+ expectedErrorVFuture string
+ }{
+ {source(5, 47), true, true,
+ "",
+ ""},
+ {source(5, 48), false, true,
+ "pc=157 dynamic cost budget exceeded, executing pushint: remaining budget is 700 but program cost was 701",
+ ""},
+ {source(16, 17), false, true,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ ""},
+ {source(16, 18), false, false,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ "pc= 78 dynamic cost budget exceeded, executing pushint: remaining budget is 2100 but program cost was 2101"},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
+ if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorV29)
+ } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
+ }
+ })
+ }
+ }
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+func TestRewardsInAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ eval := nextBlock(t, l, true, nil)
+ endBlock(t, l, eval)
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txn(t, l, eval, &payTxn)
+ vb, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ payInBlock := vb.Block().Payset[0]
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+}
+
+func TestMinBalanceChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Manager: addrs[1],
+ Reserve: addrs[2],
+ Freeze: addrs[3],
+ Clawback: addrs[4],
+ },
+ }
+
+ const expectedID basics.AssetIndex = 1
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[5],
+ }
+
+ ad0init, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5init, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ endBlock(t, l, eval)
+
+ ad0new, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5new, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+
+ proto := l.GenesisProto()
+ // Check balance and min balance requirement changes
+ require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
+ require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[1], // The manager, not the creator
+ ConfigAsset: expectedID,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ endBlock(t, l, eval)
+
+ ad0final, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5final, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+ // Check we got our balance "back"
+ require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
+ require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
+}
+
+// Test that ModifiedAssetHoldings in StateDelta is set correctly.
+func TestModifiedAssetHoldings(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const assetid basics.AssetIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: 2000,
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Decimals: 0,
+ Manager: addrs[0],
+ Reserve: addrs[0],
+ Freeze: addrs[0],
+ Clawback: addrs[0],
+ },
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: 2000,
+ XferAsset: assetid,
+ AssetAmount: 0,
+ AssetReceiver: addrs[1],
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ vb := endBlock(t, l, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: 1000,
+ XferAsset: assetid,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: 1000,
+ ConfigAsset: assetid,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ vb = endBlock(t, l, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
+
+// Test that ModifiedAppLocalStates in StateDelta is set correctly.
+func TestModifiedAppLocalStates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.OptInOC,
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ vb := endBlock(t, l, eval)
+
+ assert.Len(t, vb.Delta().ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.Delta().ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.CloseOutOC,
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appid,
+ OnCompletion: transactions.DeleteApplicationOC,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ vb = endBlock(t, l, eval)
+
+ assert.Len(t, vb.Delta().ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.Delta().ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
+
+// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
+// and do not cause any MaximumMinimumBalance problems
+func TestAppInsMinBalance(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ maxAppsOptedIn := config.Consensus[protocol.ConsensusFuture].MaxAppsOptedIn
+ require.Greater(t, maxAppsOptedIn, 0)
+ maxAppsCreated := config.Consensus[protocol.ConsensusFuture].MaxAppsCreated
+ require.Greater(t, maxAppsCreated, 0)
+ maxLocalSchemaEntries := config.Consensus[protocol.ConsensusFuture].MaxLocalSchemaEntries
+ require.Greater(t, maxLocalSchemaEntries, uint64(0))
+
+ txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ appsCreated := make(map[basics.Address]int, len(addrs)-1)
+
+ acctIdx := 0
+ for i := 0; i < maxAppsOptedIn; i++ {
+ creator := addrs[acctIdx]
+ createTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: creator,
+ ApprovalProgram: "int 1",
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ Note: ledgertesting.RandomNote(),
+ }
+ txnsCreate = append(txnsCreate, &createTxn)
+ count := appsCreated[creator]
+ count++
+ appsCreated[creator] = count
+ if count == maxAppsCreated {
+ acctIdx++
+ }
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[9],
+ ApplicationID: appid + basics.AppIndex(i),
+ OnCompletion: transactions.OptInOC,
+ }
+ txnsOptIn = append(txnsOptIn, &optInTxn)
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns1 := append(txnsCreate, txnsOptIn...)
+ txns(t, l, eval, txns1...)
+ vb := endBlock(t, l, eval)
+ require.Len(t, vb.Delta().ModifiedAppLocalStates, 50)
+}
+
+// TestGhostTransactions confirms that accounts that don't even exist
+// can be the Sender in some situations. If some other transaction
+// covers the fee, and the transaction itself does not require an
+// asset or a min balance, it's fine.
+func TestGhostTransactions(t *testing.T) {
+ t.Skip("Behavior should be changed so test passes.")
+
+ /*
+ I think we have a behavior we should fix. I’m going to call these
+ transactions where the Sender has no account and the fee=0 “ghost”
+ transactions. In a ghost transaction, we still call balances.Move to
+ “pay” the fee. Further, Move does not short-circuit a Move of 0 (for
+ good reason, allowing compounding). Therefore, in Move, we do rewards
+ processing on the “ghost” account. That causes us to want to write a
+ new accountdata for them. But if we do that, the minimum balance
+ checker will catch it, and kill the transaction because the ghost isn’t
+ allowed to have a balance of 0. I don’t think we can short-circuit
+ Move(0) because a zero pay is a known way to get your rewards
+ actualized. Instead, I advocate that we short-circuit the call to Move
+ for 0 fees.
+
+ // move fee to pool
+ if !tx.Fee.IsZero() {
+ err = balances.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
+ if err != nil {
+ return
+ }
+ }
+
+ I think this must be controlled by consensus upgrade, but I would love
+ to be told I’m wrong. The other option is to outlaw these
+ transactions, but even that requires changing code if we want to be
+ exactly correct, because they are currently allowed when there are no
+ rewards to get paid out (as would happen in a new network, or if we
+ stop participation rewards - notice that this test only fails on the
+ 4th attempt, once rewards have accumulated).
+
+ Will suggested that we could treat Ghost accounts as non-partipating.
+ Maybe that would allow the Move code to avoid trying to update
+ accountdata.
+ */
+
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ asaIndex := basics.AssetIndex(1)
+
+ asa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ Clawback: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
+ Freeze: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ Manager: basics.Address{0x0a, 0x0a, 0xe},
+ },
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txn(t, l, eval, &asa)
+ endBlock(t, l, eval)
+
+ benefactor := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ Fee: 2000,
+ }
+
+ ghost := basics.Address{0x01}
+ ephemeral := []txntest.Txn{
+ {
+ Type: "pay",
+ Amount: 0,
+ Sender: ghost,
+ Receiver: ghost,
+ Fee: 0,
+ },
+ {
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: basics.Address{0x02},
+ XferAsset: basics.AssetIndex(1),
+ Fee: 0,
+ },
+ {
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
+ AssetReceiver: addrs[0],
+ AssetSender: addrs[1],
+ XferAsset: asaIndex,
+ Fee: 0,
+ },
+ {
+ Type: "afrz",
+ Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: true,
+ Fee: 0,
+ },
+ {
+ Type: "afrz",
+ Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: false,
+ Fee: 0,
+ },
+ }
+
+ for i, e := range ephemeral {
+ eval = nextBlock(t, l, true, nil)
+ err := txgroup(t, l, eval, &benefactor, &e)
+ require.NoError(t, err, "i=%d %s", i, e.Type)
+ endBlock(t, l, eval)
+ }
+}
diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go
new file mode 100644
index 000000000..c3bae4613
--- /dev/null
+++ b/ledger/internal/eval_test.go
@@ -0,0 +1,1030 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+func TestBlockEvaluatorFeeSink(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+ require.Equal(t, eval.specials.FeeSink, testSinkAddr)
+}
+
+func TestPrepareEvalParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval := BlockEvaluator{
+ prevHeader: bookkeeping.BlockHeader{
+ TimeStamp: 1234,
+ Round: 2345,
+ },
+ }
+
+ params := []config.ConsensusParams{
+ {Application: true, MaxAppProgramCost: 700},
+ config.Consensus[protocol.ConsensusV29],
+ config.Consensus[protocol.ConsensusFuture],
+ }
+
+ // Create some sample transactions
+ payment := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ Receiver: basics.Address{4, 3, 2, 1},
+ Amount: 100,
+ }.SignedTxnWithAD()
+
+ appcall1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ ApplicationID: basics.AppIndex(1),
+ }.SignedTxnWithAD()
+
+ appcall2 := appcall1
+ appcall2.SignedTxn.Txn.ApplicationCallTxnFields.ApplicationID = basics.AppIndex(2)
+
+ type evalTestCase struct {
+ group []transactions.SignedTxnWithAD
+
+ // indicates if prepareAppEvaluators should return a non-nil
+ // appTealEvaluator for the txn at index i
+ expected []bool
+
+ numAppCalls int
+ // Used for checking transitive pointer equality in app calls
+ // If there are no app calls in the group, it is set to -1
+ firstAppCallIndex int
+ }
+
+ // Create some groups with these transactions
+ cases := []evalTestCase{
+ {[]transactions.SignedTxnWithAD{payment}, []bool{false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}, 2, 0},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}, 3, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}, 2, 0},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ eval.proto = param
+ res := eval.prepareEvalParams(testCase.group)
+ require.Equal(t, len(res), len(testCase.group))
+
+ // Compute the expected transaction group without ApplyData for
+ // the test case
+ expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
+ for k := range testCase.group {
+ expGroupNoAD[k] = testCase.group[k].SignedTxn
+ }
+
+ // Ensure non app calls have a nil evaluator, and that non-nil
+ // evaluators point to the right transactions and values
+ for k, present := range testCase.expected {
+ if present {
+ require.NotNil(t, res[k])
+ require.NotNil(t, res[k].PastSideEffects)
+ require.Equal(t, res[k].GroupIndex, uint64(k))
+ require.Equal(t, res[k].TxnGroup, expGroupNoAD)
+ require.Equal(t, *res[k].Proto, eval.proto)
+ require.Equal(t, *res[k].Txn, testCase.group[k].SignedTxn)
+ require.Equal(t, res[k].MinTealVersion, res[testCase.firstAppCallIndex].MinTealVersion)
+ require.Equal(t, res[k].PooledApplicationBudget, res[testCase.firstAppCallIndex].PooledApplicationBudget)
+ if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusV29]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost))
+ } else if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusFuture]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost*testCase.numAppCalls))
+ }
+ } else {
+ require.Nil(t, res[k])
+ }
+ }
+ })
+ }
+ }
+}
+
+func TestCowCompactCert(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var certRnd basics.Round
+ var certType protocol.CompactCertType
+ var cert compactcert.Cert
+ var atRound basics.Round
+ var validate bool
+ accts0 := ledgertesting.RandomAccounts(20, true)
+ blocks := make(map[basics.Round]bookkeeping.BlockHeader)
+ blockErr := make(map[basics.Round]error)
+ ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
+ c0 := makeRoundCowState(
+ &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
+ 0, ledgercore.AccountTotals{}, 0)
+
+ certType = protocol.CompactCertType(1234) // bad cert type
+ err := c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no certRnd block
+ certType = protocol.CompactCertBasic
+ noBlockErr := errors.New("no block")
+ blockErr[3] = noBlockErr
+ certRnd = 3
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no votersRnd block
+ // this is slightly a mess of things that don't quite line up with likely usage
+ validate = true
+ var certHdr bookkeeping.BlockHeader
+ certHdr.CurrentProtocol = "TestCowCompactCert"
+ certHdr.Round = 1
+ proto := config.Consensus[certHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[certHdr.CurrentProtocol] = proto
+ blocks[certHdr.Round] = certHdr
+
+ certHdr.Round = 15
+ blocks[certHdr.Round] = certHdr
+ certRnd = certHdr.Round
+ blockErr[13] = noBlockErr
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // validate fail
+ certHdr.Round = 1
+ certRnd = certHdr.Round
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // fall through to no err
+ validate = false
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.NoError(t, err)
+
+ // 100% coverage
+}
+
+// a couple trivial tests that don't need setup
+// see TestBlockEvaluator for more
+func TestTestTransactionGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var txgroup []transactions.SignedTxn
+ eval := BlockEvaluator{}
+ err := eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1)
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// test BlockEvaluator.transactionGroup()
+// some trivial checks that require no setup
+func TestPrivateTransactionGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var txgroup []transactions.SignedTxnWithAD
+ eval := BlockEvaluator{}
+ err := eval.TransactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1)
+ err = eval.TransactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet.
+// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning.
+func TestTestnetFixup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval := &BlockEvaluator{}
+ var rewardPoolBalance basics.AccountData
+ rewardPoolBalance.MicroAlgos.Raw = 1234
+ var headerRound basics.Round
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+
+ // not a fixup round, no change
+ headerRound = 1
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+
+ eval.genesisHash = testnetGenesisHash
+ eval.genesisHash[3]++
+
+ specialRounds := []basics.Round{1499995, 2926564}
+ for _, headerRound = range specialRounds {
+ poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+ }
+
+ for _, headerRound = range specialRounds {
+ testnetFixupExecution(t, headerRound, 20000000000)
+ }
+ // do all the setup and do nothing for not a special round
+ testnetFixupExecution(t, specialRounds[0]+1, 0)
+}
+
+func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) {
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+ // big setup so we can move some algos
+ // boilerplate like TestBlockEvaluator, but pretend to be testnet
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+ genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash
+ genesisInitState.Block.BlockHeader.GenesisID = "testnet"
+ genesisInitState.GenesisHash = testnetGenesisHash
+
+ rewardPoolBalance := genesisInitState.Accounts[testPoolAddr]
+ nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ })
+ l.blocks[0] = genesisInitState.Block
+ l.genesisHash = genesisInitState.GenesisHash
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // won't work before funding bank
+ if poolBonus > 0 {
+ _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Error(t, err)
+ }
+
+ bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A")
+
+ // put some algos in the bank so that fixup can pull from this account
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: testnetGenesisHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: bankAddr,
+ Amount: basics.MicroAlgos{Raw: 20000000000 * 10},
+ },
+ }
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
+ require.NoError(t, err)
+}
+
+// newTestGenesis creates a bunch of accounts, splits up 10B algos
+// between them and the rewardspool and feesink, and gives out the
+// addresses and secrets it creates to enable tests. For special
+// scenarios, manipulate these return values before using newTestLedger.
+func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ // irrelevant, but deterministic
+ sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
+ if err != nil {
+ panic(err)
+ }
+ rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
+ if err != nil {
+ panic(err)
+ }
+
+ const count = 10
+ addrs := make([]basics.Address, count)
+ secrets := make([]*crypto.SignatureSecrets, count)
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(count+2)
+
+ for i := 0; i < count; i++ {
+ // Create deterministic addresses, so that output stays the same, run to run.
+ var seed crypto.Seed
+ seed[0] = byte(i)
+ secrets[i] = crypto.GenerateSignatureSecrets(seed)
+ addrs[i] = basics.Address(secrets[i].SignatureVerifier)
+
+ adata := basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+ accts[addrs[i]] = adata
+ }
+
+ accts[sink] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ Status: basics.NotParticipating,
+ }
+
+ accts[rewards] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+
+ genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
+
+ return genBalances, addrs, secrets
+}
+
+type evalTestLedger struct {
+ blocks map[basics.Round]bookkeeping.Block
+ roundBalances map[basics.Round]map[basics.Address]basics.AccountData
+ genesisHash crypto.Digest
+ feeSink basics.Address
+ rewardsPool basics.Address
+ latestTotals ledgercore.AccountTotals
+}
+
+// newTestLedger creates a in memory Ledger that is as realistic as
+// possible. It has Rewards and FeeSink properly configured.
+func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTestLedger {
+ l := &evalTestLedger{
+ blocks: make(map[basics.Round]bookkeeping.Block),
+ roundBalances: make(map[basics.Round]map[basics.Address]basics.AccountData),
+ feeSink: balances.FeeSink,
+ rewardsPool: balances.RewardsPool,
+ }
+
+ crypto.RandBytes(l.genesisHash[:])
+ genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ balances, "test", l.genesisHash)
+ require.NoError(t, err)
+ l.roundBalances[0] = balances.Balances
+ l.blocks[0] = genBlock
+
+ // calculate the accounts totals.
+ var ot basics.OverflowTracker
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ for _, acctData := range balances.Balances {
+ l.latestTotals.AddAccount(proto, acctData, &ot)
+ }
+
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ return l
+}
+
+// Validate uses the ledger to validate block blk as a candidate next block.
+// It returns an error if blk is not the expected next block, or if blk is
+// not a valid block (e.g., it has duplicate transactions, overspends some
+// account, etc).
+func (ledger *evalTestLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
+ verifiedTxnCache := verify.MakeVerifiedTransactionCache(config.GetDefaultLocal().VerifiedTranscationsCacheSize)
+
+ delta, err := Eval(ctx, ledger, blk, true, verifiedTxnCache, executionPool)
+ if err != nil {
+ return nil, err
+ }
+
+ vb := ledgercore.MakeValidatedBlock(blk, delta)
+ return &vb, nil
+}
+
+// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
+// of the block that the caller is planning to evaluate. If the length of the
+// payset being evaluated is known in advance, a paysetHint >= 0 can be
+// passed, avoiding unnecessary payset slice growth.
+func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*BlockEvaluator, error) {
+ return StartEvaluator(ledger, hdr,
+ EvaluatorOptions{
+ PaysetHint: paysetHint,
+ Validate: true,
+ Generate: true,
+ MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ })
+}
+
+// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
+// look up a creator address, setting ok to false if the query succeeded but no
+// creator was found.
+func (ledger *evalTestLedger) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
+ balances := ledger.roundBalances[rnd]
+ for addr, balance := range balances {
+ if _, has := balance.AssetParams[basics.AssetIndex(cidx)]; has {
+ return addr, true, nil
+ }
+ if _, has := balance.AppParams[basics.AppIndex(cidx)]; has {
+ return addr, true, nil
+ }
+ }
+ return basics.Address{}, false, nil
+}
+
+// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number.
+func (ledger *evalTestLedger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
+ return basics.Round(len(ledger.blocks)).SubSaturate(1), ledger.latestTotals, nil
+}
+
+// LookupWithoutRewards is like Lookup but does not apply pending rewards up
+// to the requested round rnd.
+func (ledger *evalTestLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) {
+ return ledger.roundBalances[rnd][addr], rnd, nil
+}
+
+// GenesisHash returns the genesis hash for this ledger.
+func (ledger *evalTestLedger) GenesisHash() crypto.Digest {
+ return ledger.genesisHash
+}
+
+// Latest returns the latest known block round added to the ledger.
+func (ledger *evalTestLedger) Latest() basics.Round {
+ return basics.Round(len(ledger.blocks)).SubSaturate(1)
+}
+
+// AddValidatedBlock adds a new block to the ledger, after the block has
+// been validated by calling Ledger.Validate(). This saves the cost of
+// having to re-compute the effect of the block on the ledger state, if
+// the block has previously been validated. Otherwise, AddValidatedBlock
+// behaves like AddBlock.
+func (ledger *evalTestLedger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
+ blk := vb.Block()
+ ledger.blocks[blk.Round()] = blk
+ newBalances := make(map[basics.Address]basics.AccountData)
+
+ // copy the previous balances.
+ for k, v := range ledger.roundBalances[vb.Block().Round()-1] {
+ newBalances[k] = v
+ }
+ // update
+ deltas := vb.Delta()
+ for _, addr := range deltas.Accts.ModifiedAccounts() {
+ accountData, _ := deltas.Accts.Get(addr)
+ newBalances[addr] = accountData
+ }
+ ledger.roundBalances[vb.Block().Round()] = newBalances
+ ledger.latestTotals = vb.Delta().Totals
+ return nil
+}
+
+// Lookup uses the accounts tracker to return the account state for a
+// given account in a particular round. The account values reflect
+// the changes of all blocks up to and including rnd.
+func (ledger *evalTestLedger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
+ balances, has := ledger.roundBalances[rnd]
+ if !has {
+ return basics.AccountData{}, errors.New("invalid round specified")
+ }
+
+ return balances[addr], nil
+}
+func (ledger *evalTestLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ block, has := ledger.blocks[rnd]
+ if !has {
+ return bookkeeping.BlockHeader{}, errors.New("invalid round specified")
+ }
+ return block.BlockHeader, nil
+}
+
+func (ledger *evalTestLedger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
+ return nil, errors.New("untested code path")
+}
+
+// GetCreator is like GetCreatorForRound, but for the latest round and race-free
+// with respect to ledger.Latest()
+func (ledger *evalTestLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ latestRound := ledger.Latest()
+ return ledger.GetCreatorForRound(latestRound, cidx, ctype)
+}
+
+func (ledger *evalTestLedger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
+ for _, block := range ledger.blocks {
+ for _, txn := range block.Payset {
+ if lastValid != txn.Txn.LastValid {
+ continue
+ }
+ currentTxid := txn.Txn.ID()
+ if bytes.Equal(txid[:], currentTxid[:]) {
+ return &ledgercore.TransactionInLedgerError{Txid: txid}
+ }
+ }
+ }
+ // todo - support leases.
+ return nil
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func (ledger *evalTestLedger) nextBlock(t testing.TB) *BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
+ require.NoError(t, err)
+ return eval
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+// lookup gets the current accountdata for an address
+func (ledger *evalTestLedger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
+ rnd := ledger.Latest()
+ ad, err := ledger.Lookup(rnd, addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func (ledger *evalTestLedger) micros(t testing.TB, addr basics.Address) uint64 {
+ return ledger.lookup(t, addr).MicroAlgos.Raw
+}
+
+// asa gets the current balance and optin status for some asa for an address
+func (ledger *evalTestLedger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func (ledger *evalTestLedger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
+
+type getCreatorForRoundResult struct {
+ address basics.Address
+ exists bool
+}
+
+type testCowBaseLedger struct {
+ creators []getCreatorForRoundResult
+}
+
+func (l *testCowBaseLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
+ return errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error) {
+ return basics.AccountData{}, basics.Round(0), errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ res := l.creators[0]
+ l.creators = l.creators[1:]
+ return res.address, res.exists, nil
+}
+
+func TestCowBaseCreatorsCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ addresses := make([]basics.Address, 3)
+ for i := 0; i < len(addresses); i++ {
+ _, err := rand.Read(addresses[i][:])
+ require.NoError(t, err)
+ }
+
+ creators := []getCreatorForRoundResult{
+ {address: addresses[0], exists: true},
+ {address: basics.Address{}, exists: false},
+ {address: addresses[1], exists: true},
+ {address: basics.Address{}, exists: false},
+ }
+ l := testCowBaseLedger{
+ creators: creators,
+ }
+
+ base := roundCowBase{
+ l: &l,
+ creators: map[creatable]foundAddress{},
+ }
+
+ cindex := []basics.CreatableIndex{9, 10, 9, 10}
+ ctype := []basics.CreatableType{
+ basics.AssetCreatable,
+ basics.AssetCreatable,
+ basics.AppCreatable,
+ basics.AppCreatable,
+ }
+ for i := 0; i < 2; i++ {
+ for j, expected := range creators {
+ address, exists, err := base.getCreator(cindex[j], ctype[j])
+ require.NoError(t, err)
+
+ assert.Equal(t, expected.address, address)
+ assert.Equal(t, expected.exists, exists)
+ }
+ }
+}
+
+// TestEvalFunctionForExpiredAccounts tests that the eval function will correctly mark accounts as offline
+func TestEvalFunctionForExpiredAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(2)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, blkEval)
+ blkEval = l.nextBlock(t)
+ }
+
+ require.Greater(t, uint64(blkEval.Round()), uint64(recvAddrLastValidRound))
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: blkEval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = blkEval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Make sure we validate our block as well
+ blkEval.validate = true
+
+ validatedBlock, err := blkEval.GenerateBlock()
+ require.NoError(t, err)
+
+ _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil)
+ require.NoError(t, err)
+
+ badBlock := *validatedBlock
+
+ // First validate that bad block is fine if we dont touch it...
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.NoError(t, err)
+
+ badBlock = *validatedBlock
+
+ // Introduce an unknown address to introduce an error
+ badBlockObj := badBlock.Block()
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, basics.Address{1})
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+
+ addressToCopy := badBlock.Block().ExpiredParticipationAccounts[0]
+
+ // Add more than the expected number of accounts
+ badBlockObj = badBlock.Block()
+ for i := 0; i < blkEval.proto.MaxProposedExpiredOnlineAccounts+1; i++ {
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, addressToCopy)
+ }
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+
+ // Duplicate an address
+ badBlockObj = badBlock.Block()
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, badBlockObj.ExpiredParticipationAccounts[0])
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+ // sanity check that bad block is being actually copied and not just the pointer
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.NoError(t, err)
+
+}
+
+type failRoundCowParent struct {
+ roundCowBase
+}
+
+func (p *failRoundCowParent) lookup(basics.Address) (basics.AccountData, error) {
+ return basics.AccountData{}, fmt.Errorf("disk I/O fail (on purpose)")
+}
+
+// TestExpiredAccountGenerationWithDiskFailure tests edge cases where disk failures can lead to ledger look up failures
+func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(10)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ })
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, eval)
+ eval = l.nextBlock(t)
+ }
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: eval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ eval.validate = true
+ eval.generate = false
+
+ eval.block.ExpiredParticipationAccounts = append(eval.block.ExpiredParticipationAccounts, recvAddr)
+
+ err = eval.endOfBlock()
+ require.Error(t, err)
+
+ eval.block.ExpiredParticipationAccounts = []basics.Address{
+ basics.Address{},
+ }
+ eval.state.mods.Accts = ledgercore.AccountDeltas{}
+ eval.state.lookupParent = &failRoundCowParent{}
+ err = eval.endOfBlock()
+ require.Error(t, err)
+
+ err = eval.resetExpiredOnlineAccountsParticipationKeys()
+ require.Error(t, err)
+
+}
+
+// TestExpiredAccountGeneration test that expired accounts are added to a block header and validated
+func TestExpiredAccountGeneration(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(2)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ })
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, eval)
+ eval = l.nextBlock(t)
+ }
+
+ require.Greater(t, uint64(eval.Round()), uint64(recvAddrLastValidRound))
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: eval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Make sure we validate our block as well
+ eval.validate = true
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ listOfExpiredAccounts := validatedBlock.Block().ParticipationUpdates.ExpiredParticipationAccounts
+
+ require.Equal(t, 1, len(listOfExpiredAccounts))
+ expiredAccount := listOfExpiredAccounts[0]
+ require.Equal(t, expiredAccount, recvAddr)
+
+ recvAcct, err := eval.state.lookup(recvAddr)
+ require.NoError(t, err)
+ require.Equal(t, recvAcct.Status, basics.Offline)
+ require.Equal(t, recvAcct.VoteFirstValid, basics.Round(0))
+ require.Equal(t, recvAcct.VoteLastValid, basics.Round(0))
+ require.Equal(t, recvAcct.VoteKeyDilution, uint64(0))
+ require.Equal(t, recvAcct.VoteID, crypto.OneTimeSignatureVerifier{})
+ require.Equal(t, recvAcct.SelectionID, crypto.VRFVerifier{})
+
+}
diff --git a/ledger/internal/evalindexer.go b/ledger/internal/evalindexer.go
new file mode 100644
index 000000000..454ae2d7a
--- /dev/null
+++ b/ledger/internal/evalindexer.go
@@ -0,0 +1,51 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// ProcessBlockForIndexer ..
+func (eval *BlockEvaluator) ProcessBlockForIndexer(block *bookkeeping.Block) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
+ paysetgroups, err := block.DecodePaysetGroups()
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+
+ for _, group := range paysetgroups {
+ err = eval.TransactionGroup(group)
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+ }
+
+ // Finally, process any pending end-of-block state changes.
+ err = eval.endOfBlock()
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+
+ return eval.state.deltas(), eval.block.Payset, nil
+}
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 42c878834..4c0ececaa 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -28,13 +28,18 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -70,12 +75,12 @@ type Ledger struct {
genesisProto config.ConsensusParams
// State-machine trackers
- accts accountUpdates
- txTail txTail
- bulletin bulletin
- notifier blockNotifier
- time timeTracker
- metrics metricsTracker
+ accts accountUpdates
+ catchpoint catchpointTracker
+ txTail txTail
+ bulletin bulletin
+ notifier blockNotifier
+ metrics metricsTracker
trackers trackerRegistry
trackerMu deadlock.RWMutex
@@ -84,13 +89,8 @@ type Ledger struct {
// verifiedTxnCache holds all the verified transactions state
verifiedTxnCache verify.VerifiedTransactionCache
-}
-// InitState structure defines blockchain init params
-type InitState struct {
- Block bookkeeping.Block
- Accounts map[basics.Address]basics.AccountData
- GenesisHash crypto.Digest
+ cfg config.Local
}
// OpenLedger creates a Ledger object, using SQLite database filenames
@@ -98,7 +98,7 @@ type InitState struct {
// genesisInitState.Accounts specify the initial blocks and accounts to use if the
// database wasn't initialized before.
func OpenLedger(
- log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState InitState, cfg config.Local,
+ log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState ledgercore.InitState, cfg config.Local,
) (*Ledger, error) {
var err error
verifiedCacheSize := cfg.VerifiedTranscationsCacheSize
@@ -116,6 +116,7 @@ func OpenLedger(
synchronousMode: db.SynchronousMode(cfg.LedgerSynchronousMode),
accountsRebuildSynchronousMode: db.SynchronousMode(cfg.AccountsRebuildSynchronousMode),
verifiedTxnCache: verify.MakeVerifiedTransactionCache(verifiedCacheSize),
+ cfg: cfg,
}
l.headerCache.maxEntries = 10
@@ -153,7 +154,8 @@ func OpenLedger(
l.genesisAccounts = make(map[basics.Address]basics.AccountData)
}
- l.accts.initialize(cfg, dbPathPrefix, l.genesisProto, l.genesisAccounts)
+ l.accts.initialize(cfg)
+ l.catchpoint.initialize(cfg, dbPathPrefix)
err = l.reloadLedger()
if err != nil {
@@ -179,7 +181,7 @@ func (l *Ledger) reloadLedger() error {
// close the trackers.
l.trackers.close()
- // reload -
+ // init block queue
var err error
l.blockQ, err = bqInit(l)
if err != nil {
@@ -187,12 +189,26 @@ func (l *Ledger) reloadLedger() error {
return err
}
- l.trackers.register(&l.accts) // update the balances
- l.trackers.register(&l.time) // tracks the block timestamps
- l.trackers.register(&l.txTail) // update the transaction tail, tracking the recent 1000 txn
- l.trackers.register(&l.bulletin) // provide closed channel signaling support for completed rounds
- l.trackers.register(&l.notifier) // send OnNewBlocks to subscribers
- l.trackers.register(&l.metrics) // provides metrics reporting support
+ // init tracker db
+ trackerDBInitParams, err := trackerDBInitialize(l, l.catchpoint.catchpointEnabled(), l.catchpoint.dbDirectory)
+ if err != nil {
+ return err
+ }
+
+ // set account updates tracker as a driver to calculate tracker db round and committing offsets
+ trackers := []ledgerTracker{
+ &l.accts, // update the balances
+ &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
+ &l.txTail, // update the transaction tail, tracking the recent 1000 txn
+ &l.bulletin, // provide closed channel signaling support for completed rounds
+ &l.notifier, // send OnNewBlocks to subscribers
+ &l.metrics, // provides metrics reporting support
+ }
+
+ err = l.trackers.initialize(l, trackers, l.cfg)
+ if err != nil {
+ return err
+ }
err = l.trackers.loadFromDisk(l)
if err != nil {
@@ -200,6 +216,14 @@ func (l *Ledger) reloadLedger() error {
return err
}
+ // post-init actions
+ if trackerDBInitParams.vacuumOnStartup || l.cfg.OptimizeAccountsDatabaseOnStartup {
+ err = l.accts.vacuumDatabase(context.Background())
+ if err != nil {
+ return err
+ }
+ }
+
// Check that the genesis hash, if present, matches.
err = l.verifyMatchingGenesisHash()
if err != nil {
@@ -386,7 +410,7 @@ func (l *Ledger) notifyCommit(r basics.Round) basics.Round {
func (l *Ledger) GetLastCatchpointLabel() string {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.GetLastCatchpointLabel()
+ return l.catchpoint.GetLastCatchpointLabel()
}
// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
@@ -409,7 +433,7 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy
// CompactCertVoters returns the top online accounts at round rnd.
// The result might be nil, even with err=nil, if there are no voters
// for that round because compact certs were not enabled.
-func (l *Ledger) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) {
+func (l *Ledger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.voters.getVoters(rnd)
@@ -449,6 +473,20 @@ func (l *Ledger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountDa
return data, nil
}
+// LookupAgreement returns account data used by agreement.
+func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ // Intentionally apply (pending) rewards up to rnd.
+ data, err := l.accts.LookupWithRewards(rnd, addr)
+ if err != nil {
+ return basics.OnlineAccountData{}, err
+ }
+
+ return data.OnlineAccountData(), nil
+}
+
// LookupWithoutRewards is like Lookup but does not apply pending rewards up
// to the requested round rnd.
func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) {
@@ -463,18 +501,29 @@ func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ba
return data, validThrough, nil
}
-// Totals returns the totals of all accounts at the end of round rnd.
-func (l *Ledger) Totals(rnd basics.Round) (ledgercore.AccountTotals, error) {
+// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number.
+func (l *Ledger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.Totals(rnd)
+ return l.accts.LatestTotals()
+}
+
+// OnlineTotals returns the online totals of all accounts at the end of round rnd.
+func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ totals, err := l.accts.Totals(rnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+ return totals.Online.Money, nil
}
// CheckDup return whether a transaction is a duplicate one.
-func (l *Ledger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl TxLease) error {
+func (l *Ledger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.txTail.checkDup(currentProto, current, firstValid, lastValid, txid, txl.Txlease)
+ return l.txTail.checkDup(currentProto, current, firstValid, lastValid, txid, txl)
}
// Latest returns the latest known block round added to the ledger.
@@ -527,15 +576,11 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
+ updates, err := internal.Eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
if err != nil {
return err
}
-
- vb := ValidatedBlock{
- blk: blk,
- delta: updates,
- }
+ vb := ledgercore.MakeValidatedBlock(blk, updates)
return l.AddValidatedBlock(vb, cert)
}
@@ -545,18 +590,19 @@ func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) err
// having to re-compute the effect of the block on the ledger state, if
// the block has previously been validated. Otherwise, AddValidatedBlock
// behaves like AddBlock.
-func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate) error {
+func (l *Ledger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
// Grab the tracker lock first, to ensure newBlock() is notified before committedUpTo().
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
- err := l.blockQ.putBlock(vb.blk, cert)
+ blk := vb.Block()
+ err := l.blockQ.putBlock(blk, cert)
if err != nil {
return err
}
- l.headerCache.Put(vb.blk.Round(), vb.blk.BlockHeader)
- l.trackers.newBlock(vb.blk, vb.delta)
- l.log.Debugf("added blk %d", vb.blk.Round())
+ l.headerCache.Put(blk.Round(), blk.BlockHeader)
+ l.trackers.newBlock(blk, vb.Delta())
+ l.log.Debugf("added blk %d", blk.Round())
return nil
}
@@ -577,14 +623,6 @@ func (l *Ledger) Wait(r basics.Round) chan struct{} {
return l.bulletin.Wait(r)
}
-// Timestamp uses the timestamp tracker to return the timestamp
-// from block r.
-func (l *Ledger) Timestamp(r basics.Round) (int64, error) {
- l.trackerMu.RLock()
- defer l.trackerMu.RUnlock()
- return l.time.timestamp(r)
-}
-
// GenesisHash returns the genesis hash for this ledger.
func (l *Ledger) GenesisHash() crypto.Digest {
return l.genesisHash
@@ -595,6 +633,11 @@ func (l *Ledger) GenesisProto() config.ConsensusParams {
return l.genesisProto
}
+// GenesisAccounts returns initial accounts for this ledger.
+func (l *Ledger) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return l.genesisAccounts
+}
+
// GetCatchpointCatchupState returns the current state of the catchpoint catchup.
func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state CatchpointCatchupState, err error) {
return MakeCatchpointCatchupAccessor(l, l.log).GetState(ctx)
@@ -608,7 +651,7 @@ func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state Catchpoin
func (l *Ledger) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.GetCatchpointStream(round)
+ return l.catchpoint.GetCatchpointStream(round)
}
// ledgerForTracker methods
@@ -628,9 +671,9 @@ func (l *Ledger) trackerLog() logging.Logger {
// trackerEvalVerified is used by the accountUpdates to reconstruct the ledgercore.StateDelta from a given block during it's loadFromDisk execution.
// when this function is called, the trackers mutex is expected already to be taken. The provided accUpdatesLedger would allow the
// evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time.
-func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
+ return internal.Eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
}
// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
@@ -638,7 +681,7 @@ func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger led
func (l *Ledger) IsWritingCatchpointFile() bool {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.IsWritingCatchpointFile()
+ return l.catchpoint.IsWritingCatchpointFile()
}
// VerifiedTransactionCache returns the verify.VerifiedTransactionCache
@@ -646,9 +689,61 @@ func (l *Ledger) VerifiedTransactionCache() verify.VerifiedTransactionCache {
return l.verifiedTxnCache
}
-// TxLease is an exported version of txlease
-type TxLease struct {
- ledgercore.Txlease
+// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
+// of the block that the caller is planning to evaluate. If the length of the
+// payset being evaluated is known in advance, a paysetHint >= 0 can be
+// passed, avoiding unnecessary payset slice growth. The optional maxTxnBytesPerBlock parameter
+// provides a cap on the size of a single generated block size, when a non-zero value is passed.
+// If a value of zero or less is passed to maxTxnBytesPerBlock, the consensus MaxTxnBytesPerBlock would
+// be used instead.
+func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*internal.BlockEvaluator, error) {
+ return internal.StartEvaluator(l, hdr,
+ internal.EvaluatorOptions{
+ PaysetHint: paysetHint,
+ Generate: true,
+ Validate: true,
+ MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ })
+}
+
+// Validate uses the ledger to validate block blk as a candidate next block.
+// It returns an error if blk is not the expected next block, or if blk is
+// not a valid block (e.g., it has duplicate transactions, overspends some
+// account, etc).
+func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
+ delta, err := internal.Eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
+ if err != nil {
+ return nil, err
+ }
+
+ vb := ledgercore.MakeValidatedBlock(blk, delta)
+ return &vb, nil
+}
+
+// CompactCertParams computes the parameters for building or verifying
+// a compact cert for block hdr, using voters from block votersHdr.
+func CompactCertParams(votersHdr bookkeeping.BlockHeader, hdr bookkeeping.BlockHeader) (res compactcert.Params, err error) {
+ return internal.CompactCertParams(votersHdr, hdr)
+}
+
+// AcceptableCompactCertWeight computes the acceptable signed weight
+// of a compact cert if it were to appear in a transaction with a
+// particular firstValid round. Earlier rounds require a smaller cert.
+// votersHdr specifies the block that contains the Merkle commitment of
+// the voters for this compact cert (and thus the compact cert is for
+// votersHdr.Round() + CompactCertRounds).
+//
+// logger must not be nil; use at least logging.Base()
+func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
+ return internal.AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+}
+
+// DebuggerLedger defines the minimal set of method required for creating a debug balances.
+type DebuggerLedger = internal.LedgerForCowBase
+
+// MakeDebugBalances creates a ledger suitable for dryrun and debugger
+func MakeDebugBalances(l DebuggerLedger, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
+ return internal.MakeDebugBalances(l, round, proto, prevTimestamp)
}
var ledgerInitblocksdbCount = metrics.NewCounter("ledger_initblocksdb_count", "calls")
diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go
index b1b2244b4..e3f36c4f7 100644
--- a/ledger/ledger_perf_test.go
+++ b/ledger/ledger_perf_test.go
@@ -37,6 +37,8 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -206,7 +208,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
prev, err := l0.BlockHdr(basics.Round(i))
require.NoError(b, err)
newBlk := bookkeeping.MakeBlock(prev)
- eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000)
+ eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
require.NoError(b, err)
// build a payset
@@ -261,8 +263,8 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
}
// check if block is full
- if err == ErrNoSpace {
- txPerBlock = len(eval.block.Payset)
+ if err == ledgercore.ErrNoSpace {
+ txPerBlock = eval.PaySetSize()
break
} else {
require.NoError(b, err)
@@ -271,7 +273,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
// First block just creates app + opts in accts if asa test
if i == 1 {
onCompletion = transactions.NoOpOC
- createdAppIdx = eval.state.txnCounter()
+ createdAppIdx = eval.TestingTxnCounter()
// On first block, opt in all accts to asa (accts is empty if not asa test)
k := 0
@@ -298,19 +300,19 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
// If this is the app creation block, add to both ledgers
if i == 1 {
- err = l0.AddBlock(lvb.blk, cert)
+ err = l0.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
- err = l1.AddBlock(lvb.blk, cert)
+ err = l1.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
continue
}
// For all other blocks, add just to the first ledger, and stash
// away to be replayed in the second ledger while running timer
- err = l0.AddBlock(lvb.blk, cert)
+ err = l0.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
- blocks = append(blocks, lvb.blk)
+ blocks = append(blocks, lvb.Block())
}
b.Logf("built %d blocks, each with %d txns", numBlocks, txPerBlock)
@@ -319,7 +321,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
vc := verify.GetMockedCache(true)
b.ResetTimer()
for _, blk := range blocks {
- _, err = eval(context.Background(), l1, blk, true, vc, nil)
+ _, err = internal.Eval(context.Background(), l1, blk, true, vc, nil)
require.NoError(b, err)
err = l1.AddBlock(blk, cert)
require.NoError(b, err)
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index ba2ac5a39..9fa3c08bd 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -36,26 +36,13 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
)
-var poolSecret, sinkSecret *crypto.SignatureSecrets
-
-func init() {
- var seed crypto.Seed
-
- incentivePoolName := []byte("incentive pool")
- copy(seed[:], incentivePoolName)
- poolSecret = crypto.GenerateSignatureSecrets(seed)
-
- feeSinkName := []byte("fee sink")
- copy(seed[:], feeSinkName)
- sinkSecret = crypto.GenerateSignatureSecrets(seed)
-}
-
func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Transaction) transactions.SignedTxn {
var sig crypto.Signature
_, ok := secrets[t.Sender]
@@ -68,72 +55,6 @@ func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Tr
}
}
-func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
- params := config.Consensus[proto]
- poolAddr := testPoolAddr
- sinkAddr := testSinkAddr
-
- var zeroSeed crypto.Seed
- var genaddrs [10]basics.Address
- var gensecrets [10]*crypto.SignatureSecrets
- for i := range genaddrs {
- seed := zeroSeed
- seed[0] = byte(i)
- x := crypto.GenerateSignatureSecrets(seed)
- genaddrs[i] = basics.Address(x.SignatureVerifier)
- gensecrets[i] = x
- }
-
- initKeys = make(map[basics.Address]*crypto.SignatureSecrets)
- initAccounts := make(map[basics.Address]basics.AccountData)
- for i := range genaddrs {
- initKeys[genaddrs[i]] = gensecrets[i]
- // Give each account quite a bit more balance than MinFee or MinBalance
- initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
- }
- initKeys[poolAddr] = poolSecret
- initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
- initKeys[sinkAddr] = sinkSecret
- initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
-
- incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
- var initialRewardsPerRound uint64
- if params.InitialRewardsRateCalculation {
- initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
- } else {
- initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
- }
-
- initBlock := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- GenesisID: tb.Name(),
- Round: 0,
- RewardsState: bookkeeping.RewardsState{
- RewardsRate: initialRewardsPerRound,
- RewardsPool: poolAddr,
- FeeSink: sinkAddr,
- },
- UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: proto,
- },
- },
- }
-
- var err error
- initBlock.TxnRoot, err = initBlock.PaysetCommit()
- require.NoError(tb, err)
-
- if params.SupportGenesisHash {
- initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name()))
- }
-
- genesisInitState.Block = initBlock
- genesisInitState.Accounts = initAccounts
- genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name()))
-
- return
-}
-
func (l *Ledger) appendUnvalidated(blk bookkeeping.Block) error {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
@@ -186,8 +107,9 @@ func makeNewEmptyBlock(t *testing.T, l *Ledger, GenesisID string, initAccounts m
}
}
} else {
- totals, err := l.Totals(l.Latest())
+ latestRound, totals, err := l.LatestTotals()
require.NoError(t, err)
+ require.Equal(t, l.Latest(), latestRound)
totalRewardUnits = totals.RewardUnits()
}
poolBal, err := l.Lookup(l.Latest(), poolAddr)
@@ -257,7 +179,7 @@ func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.A
func TestLedgerBasic(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -272,7 +194,7 @@ func TestLedgerBlockHeaders(t *testing.T) {
a := require.New(t)
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -418,7 +340,7 @@ func TestLedgerSingleTx(t *testing.T) {
// V15 is the earliest protocol version in active use.
// The genesis for betanet and testnet is at V15
// The genesis for mainnet is at V17
- genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protocol.ConsensusV15, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -621,7 +543,7 @@ func TestLedgerSingleTxV24(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -792,7 +714,7 @@ func TestLedgerAppCrossRoundWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -930,7 +852,7 @@ func TestLedgerAppMultiTxnWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1095,7 +1017,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, version, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1367,7 +1289,7 @@ func TestLedgerRegressionFaultyLeaseFirstValidCheckFuture(t *testing.T) {
func testLedgerRegressionFaultyLeaseFirstValidCheck2f3880f7(t *testing.T, version protocol.ConsensusVersion) {
a := require.New(t)
- genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, version, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -1493,7 +1415,7 @@ func TestGetLastCatchpointLabel(t *testing.T) {
partitiontest.PartitionTest(t)
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1504,7 +1426,7 @@ func TestGetLastCatchpointLabel(t *testing.T) {
// set some value
lastCatchpointLabel := "someCatchpointLabel"
- ledger.accts.lastCatchpointLabel = lastCatchpointLabel
+ ledger.catchpoint.lastCatchpointLabel = lastCatchpointLabel
// verify the value is returned
require.Equal(t, lastCatchpointLabel, ledger.GetLastCatchpointLabel())
@@ -1572,7 +1494,7 @@ func TestListAssetsAndApplications(t *testing.T) {
numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1631,7 +1553,7 @@ func TestLedgerMemoryLeak(t *testing.T) {
t.Skip() // for manual runs only
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
const inMem = false
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -1755,7 +1677,7 @@ func BenchmarkLedgerStartup(b *testing.B) {
log := logging.TestingLog(b)
tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkLedgerStartup")
require.NoError(b, err)
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
cfg.Archival = false
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index 65cbcff02..68185a573 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -17,12 +17,16 @@
package ledgercore
import (
+ "errors"
"fmt"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
)
+// ErrNoSpace indicates insufficient space for transaction in block
+var ErrNoSpace = errors.New("block does not have space for transaction")
+
// TransactionInLedgerError is returned when a transaction cannot be added because it has already been done
type TransactionInLedgerError struct {
Txid transactions.Txid
@@ -79,10 +83,27 @@ func (err ErrNoEntry) Error() string {
// LogicEvalError indicates TEAL evaluation failure
type LogicEvalError struct {
- Err error
+ Err error
+ Details string
}
// Error satisfies builtin interface `error`
func (err LogicEvalError) Error() string {
- return fmt.Sprintf("logic eval error: %v", err.Err)
+ msg := fmt.Sprintf("logic eval error: %v", err.Err)
+ if len(err.Details) > 0 {
+ msg = fmt.Sprintf("%s. Details: %s", msg, err.Details)
+ }
+ return msg
+}
+
+// ErrNonSequentialBlockEval provides feedback when the evaluator cannot be created for
+// stale/future rounds.
+type ErrNonSequentialBlockEval struct {
+ EvaluatorRound basics.Round // EvaluatorRound is the round the evaluator was created for
+ LatestRound basics.Round // LatestRound is the latest round available on disk
+}
+
+// Error satisfies builtin interface `error`
+func (err ErrNonSequentialBlockEval) Error() string {
+ return fmt.Sprintf("block evaluation for round %d requires sequential evaluation while the latest round is %d", err.EvaluatorRound, err.LatestRound)
}
diff --git a/ledger/ledgercore/misc.go b/ledger/ledgercore/misc.go
new file mode 100644
index 000000000..f4fd21f50
--- /dev/null
+++ b/ledger/ledgercore/misc.go
@@ -0,0 +1,51 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+)
+
+// ParticipantsArray implements merklearray.Array and is used to commit
+// to a Merkle tree of online accounts.
+//msgp:ignore ParticipantsArray
+type ParticipantsArray []basics.Participant
+
+// Length returns the ledger of the array.
+func (a ParticipantsArray) Length() uint64 {
+ return uint64(len(a))
+}
+
+// GetHash returns the hash for the given position.
+func (a ParticipantsArray) GetHash(pos uint64) (crypto.Digest, error) {
+ if pos >= uint64(len(a)) {
+ return crypto.Digest{}, fmt.Errorf("array ParticipantsArray.Get(%d) out of bounds %d", pos, len(a))
+ }
+
+ return crypto.HashObj(a[pos]), nil
+}
+
+// InitState structure defines blockchain init params
+type InitState struct {
+ Block bookkeeping.Block
+ Accounts map[basics.Address]basics.AccountData
+ GenesisHash crypto.Digest
+}
diff --git a/agreement/fuzzer/keyManager_test.go b/ledger/ledgercore/onlineacct.go
index c888b4955..de786e9be 100644
--- a/agreement/fuzzer/keyManager_test.go
+++ b/ledger/ledgercore/onlineacct.go
@@ -14,21 +14,25 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package fuzzer
+package ledgercore
import (
- "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
)
-type simpleKeyManager []account.Participation
-
-func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
- var km []account.Participation
- for _, acc := range m {
- if acc.OverlapsInterval(votingRound, votingRound) {
- km = append(km, acc)
- }
- }
- return km
+// An OnlineAccount corresponds to an account whose AccountData.Status
+// is Online. This is used for a Merkle tree commitment of online
+// accounts, which is subsequently used to validate participants for
+// a compact certificate.
+type OnlineAccount struct {
+ // These are a subset of the fields from the corresponding AccountData.
+ Address basics.Address
+ MicroAlgos basics.MicroAlgos
+ RewardsBase uint64
+ NormalizedOnlineBalance uint64
+ VoteID crypto.OneTimeSignatureVerifier
+ VoteFirstValid basics.Round
+ VoteLastValid basics.Round
+ VoteKeyDilution uint64
}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 1edb4e8c0..60336d939 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -97,6 +97,9 @@ type StateDelta struct {
// initial hint for allocating data structures for StateDelta
initialTransactionsCount int
+
+ // The account totals reflecting the changes in this StateDelta object.
+ Totals AccountTotals
}
// AccountDeltas stores ordered accounts and allows fast lookup by address
diff --git a/ledger/ledgercore/validatedBlock.go b/ledger/ledgercore/validatedBlock.go
new file mode 100644
index 000000000..ef6c8f250
--- /dev/null
+++ b/ledger/ledgercore/validatedBlock.go
@@ -0,0 +1,59 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
+)
+
+// ValidatedBlock represents the result of a block validation. It can
+// be used to efficiently add the block to the ledger, without repeating
+// the work of applying the block's changes to the ledger state.
+type ValidatedBlock struct {
+ blk bookkeeping.Block
+ delta StateDelta
+}
+
+// Block returns the underlying Block for a ValidatedBlock.
+func (vb ValidatedBlock) Block() bookkeeping.Block {
+ return vb.blk
+}
+
+// Delta returns the underlying Delta for a ValidatedBlock.
+func (vb ValidatedBlock) Delta() StateDelta {
+ return vb.delta
+}
+
+// WithSeed returns a copy of the ValidatedBlock with a modified seed.
+func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
+ newblock := vb.blk
+ newblock.BlockHeader.Seed = s
+
+ return ValidatedBlock{
+ blk: newblock,
+ delta: vb.delta,
+ }
+}
+
+// MakeValidatedBlock creates a validated block.
+func MakeValidatedBlock(blk bookkeeping.Block, delta StateDelta) ValidatedBlock {
+ return ValidatedBlock{
+ blk: blk,
+ delta: delta,
+ }
+}
diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go
new file mode 100644
index 000000000..de76d2d29
--- /dev/null
+++ b/ledger/ledgercore/votersForRound.go
@@ -0,0 +1,164 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+)
+
+// VotersForRound tracks the top online voting accounts as of a particular
+// round, along with a Merkle tree commitment to those voting accounts.
+type VotersForRound struct {
+ // Because it can take some time to compute the top participants and the
+ // corresponding Merkle tree, the votersForRound is constructed in
+ // the background. This means that fields (participants, adddToPos,
+ // tree, and totalWeight) could be nil/zero while a background thread
+ // is computing them. Once the fields are set, however, they are
+ // immutable, and it is no longer necessary to acquire the lock.
+ //
+ // If an error occurs while computing the tree in the background,
+ // loadTreeError might be set to non-nil instead. That also finalizes
+ // the state of this VotersForRound.
+ mu deadlock.Mutex
+ cond *sync.Cond
+ loadTreeError error
+
+ // Proto is the ConsensusParams for the round whose balances are reflected
+ // in participants.
+ Proto config.ConsensusParams
+
+ // Participants is the array of top #CompactCertVoters online accounts
+ // in this round, sorted by normalized balance (to make sure heavyweight
+ // accounts are biased to the front).
+ Participants ParticipantsArray
+
+ // AddrToPos specifies the position of a given account address (if present)
+ // in the Participants array. This allows adding a vote from a given account
+ // to the certificate builder.
+ AddrToPos map[basics.Address]uint64
+
+ // Tree is a constructed Merkle tree of the Participants array.
+ Tree *merklearray.Tree
+
+ // TotalWeight is the sum of the weights from the Participants array.
+ TotalWeight basics.MicroAlgos
+}
+
+// TopOnlineAccounts is the function signature for a method that would return the top online accounts.
+type TopOnlineAccounts func(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*OnlineAccount, error)
+
+// MakeVotersForRound create a new VotersForRound object and initialize it's cond.
+func MakeVotersForRound() *VotersForRound {
+ vr := &VotersForRound{}
+ vr.cond = sync.NewCond(&vr.mu)
+ return vr
+}
+
+// LoadTree todo
+func (tr *VotersForRound) LoadTree(onlineTop TopOnlineAccounts, hdr bookkeeping.BlockHeader) error {
+ r := hdr.Round
+
+ // certRound is the block that we expect to form a compact certificate for,
+ // using the balances from round r.
+ certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds)
+
+ // sigKeyRound is the ephemeral key ID that we expect to be used for signing
+ // the block from certRound. It is one higher because the keys for certRound
+ // might be deleted by the time consensus is reached on the block and we try
+ // to sign the compact cert for block certRound.
+ sigKeyRound := certRound + 1
+
+ top, err := onlineTop(r, sigKeyRound, tr.Proto.CompactCertTopVoters)
+ if err != nil {
+ return err
+ }
+
+ participants := make(ParticipantsArray, len(top))
+ addrToPos := make(map[basics.Address]uint64)
+ var totalWeight basics.MicroAlgos
+
+ for i, acct := range top {
+ var ot basics.OverflowTracker
+ rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel)
+ money := ot.AddA(acct.MicroAlgos, rewards)
+ if ot.Overflowed {
+ return fmt.Errorf("votersTracker.LoadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards)
+ }
+
+ totalWeight = ot.AddA(totalWeight, money)
+ if ot.Overflowed {
+ return fmt.Errorf("votersTracker.LoadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64())
+ }
+
+ keyDilution := acct.VoteKeyDilution
+ if keyDilution == 0 {
+ keyDilution = tr.Proto.DefaultKeyDilution
+ }
+
+ participants[i] = basics.Participant{
+ PK: acct.VoteID,
+ Weight: money.ToUint64(),
+ KeyDilution: keyDilution,
+ }
+ addrToPos[acct.Address] = uint64(i)
+ }
+
+ tree, err := merklearray.Build(participants)
+ if err != nil {
+ return err
+ }
+
+ tr.mu.Lock()
+ tr.AddrToPos = addrToPos
+ tr.Participants = participants
+ tr.TotalWeight = totalWeight
+ tr.Tree = tree
+ tr.cond.Broadcast()
+ tr.mu.Unlock()
+
+ return nil
+}
+
+// BroadcastError broadcasts the error
+func (tr *VotersForRound) BroadcastError(err error) {
+ tr.mu.Lock()
+ tr.loadTreeError = err
+ tr.cond.Broadcast()
+ tr.mu.Unlock()
+}
+
+//Wait waits for the tree to get constructed.
+func (tr *VotersForRound) Wait() error {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+ for tr.Tree == nil {
+ if tr.loadTreeError != nil {
+ return tr.loadTreeError
+ }
+
+ tr.cond.Wait()
+ }
+ return nil
+}
diff --git a/ledger/metrics.go b/ledger/metrics.go
index a2276c006..55a84d563 100644
--- a/ledger/metrics.go
+++ b/ledger/metrics.go
@@ -17,6 +17,9 @@
package ledger
import (
+ "context"
+ "database/sql"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -29,7 +32,7 @@ type metricsTracker struct {
ledgerRound *metrics.Gauge
}
-func (mt *metricsTracker) loadFromDisk(l ledgerForTracker) error {
+func (mt *metricsTracker) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
mt.ledgerTransactionsTotal = metrics.MakeCounter(metrics.LedgerTransactionsTotal)
mt.ledgerRewardClaimsTotal = metrics.MakeCounter(metrics.LedgerRewardClaimsTotal)
mt.ledgerRound = metrics.MakeGauge(metrics.LedgerRound)
@@ -47,6 +50,23 @@ func (mt *metricsTracker) newBlock(blk bookkeeping.Block, delta ledgercore.State
mt.ledgerRewardClaimsTotal.Add(float64(1), map[string]string{})
}
-func (mt *metricsTracker) committedUpTo(committedRnd basics.Round) basics.Round {
- return committedRnd
+func (mt *metricsTracker) committedUpTo(committedRnd basics.Round) (retRound, lookback basics.Round) {
+ return committedRnd, basics.Round(0)
+}
+
+func (mt *metricsTracker) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (mt *metricsTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (mt *metricsTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (mt *metricsTracker) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+func (mt *metricsTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index b3eb7d89d..f35709100 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -47,14 +47,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// storageAction
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
// MarshalMsg implements msgp.Marshaler
func (z CatchpointCatchupState) MarshalMsg(b []byte) (o []byte) {
@@ -851,49 +843,3 @@ func (z *encodedBalanceRecord) Msgsize() (s int) {
func (z *encodedBalanceRecord) MsgIsZero() bool {
return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero())
}
-
-// MarshalMsg implements msgp.Marshaler
-func (z storageAction) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendUint64(o, uint64(z))
- return
-}
-
-func (_ storageAction) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(storageAction)
- if !ok {
- _, ok = (z).(*storageAction)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *storageAction) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var zb0001 uint64
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- (*z) = storageAction(zb0001)
- }
- o = bts
- return
-}
-
-func (_ *storageAction) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*storageAction)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z storageAction) Msgsize() (s int) {
- s = msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z storageAction) MsgIsZero() bool {
- return z == 0
-}
diff --git a/ledger/notifier.go b/ledger/notifier.go
index d5a0d4886..e922c73e1 100644
--- a/ledger/notifier.go
+++ b/ledger/notifier.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"sync"
"github.com/algorand/go-deadlock"
@@ -85,7 +87,7 @@ func (bn *blockNotifier) close() {
bn.closing.Wait()
}
-func (bn *blockNotifier) loadFromDisk(l ledgerForTracker) error {
+func (bn *blockNotifier) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
bn.cond = sync.NewCond(&bn.mu)
bn.running = true
bn.pendingBlocks = nil
@@ -108,6 +110,24 @@ func (bn *blockNotifier) newBlock(blk bookkeeping.Block, delta ledgercore.StateD
bn.cond.Broadcast()
}
-func (bn *blockNotifier) committedUpTo(rnd basics.Round) basics.Round {
- return rnd
+func (bn *blockNotifier) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
+ return rnd, basics.Round(0)
+}
+
+func (bn *blockNotifier) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (bn *blockNotifier) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (bn *blockNotifier) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (bn *blockNotifier) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+
+func (bn *blockNotifier) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/onlineacct.go b/ledger/onlinetopheap.go
index 687f0c595..72a81d88a 100644
--- a/ledger/onlineacct.go
+++ b/ledger/onlinetopheap.go
@@ -19,29 +19,12 @@ package ledger
import (
"bytes"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
-// An onlineAccount corresponds to an account whose AccountData.Status
-// is Online. This is used for a Merkle tree commitment of online
-// accounts, which is subsequently used to validate participants for
-// a compact certificate.
-type onlineAccount struct {
- // These are a subset of the fields from the corresponding AccountData.
- Address basics.Address
- MicroAlgos basics.MicroAlgos
- RewardsBase uint64
- NormalizedOnlineBalance uint64
- VoteID crypto.OneTimeSignatureVerifier
- VoteFirstValid basics.Round
- VoteLastValid basics.Round
- VoteKeyDilution uint64
-}
-
// onlineTopHeap implements heap.Interface for tracking top N online accounts.
type onlineTopHeap struct {
- accts []*onlineAccount
+ accts []*ledgercore.OnlineAccount
}
// Len implements sort.Interface
@@ -78,7 +61,7 @@ func (h *onlineTopHeap) Swap(i, j int) {
// Push implements heap.Interface
func (h *onlineTopHeap) Push(x interface{}) {
- h.accts = append(h.accts, x.(*onlineAccount))
+ h.accts = append(h.accts, x.(*ledgercore.OnlineAccount))
}
// Pop implements heap.Interface
diff --git a/ledger/onlineacct_test.go b/ledger/onlinetopheap_test.go
index c1d05fa15..11c85d599 100644
--- a/ledger/onlineacct_test.go
+++ b/ledger/onlinetopheap_test.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -30,7 +31,7 @@ func TestOnlineTopHeap_Less(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -59,7 +60,7 @@ func TestOnlineTopHeap_Swap(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -88,7 +89,7 @@ func TestOnlineTopHeap_Push(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -102,7 +103,7 @@ func TestOnlineTopHeap_Push(t *testing.T) {
acct0 := h.accts[0]
acct1 := h.accts[1]
- acct2 := &onlineAccount{
+ acct2 := &ledgercore.OnlineAccount{
Address: basics.Address(crypto.Hash([]byte("address"))),
NormalizedOnlineBalance: 0,
}
@@ -119,7 +120,7 @@ func TestOnlineTopHeap_Pop(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
diff --git a/ledger/perf_test.go b/ledger/perf_test.go
index c1e520fef..15aca599f 100644
--- a/ledger/perf_test.go
+++ b/ledger/perf_test.go
@@ -30,64 +30,18 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/execpool"
)
-func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecrets) {
- return genesisWithProto(naccts, protocol.ConsensusCurrentVersion)
-}
-func genesisWithProto(naccts int, proto protocol.ConsensusVersion) (InitState, []basics.Address, []*crypto.SignatureSecrets) {
- blk := bookkeeping.Block{}
- blk.CurrentProtocol = proto
- blk.BlockHeader.GenesisID = "test"
- blk.FeeSink = testSinkAddr
- blk.RewardsPool = testPoolAddr
- crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
-
- addrs := []basics.Address{}
- keys := []*crypto.SignatureSecrets{}
- accts := make(map[basics.Address]basics.AccountData)
-
- // 10 billion microalgos, across N accounts and pool and sink
- amount := 10 * 1000000000 * 1000000 / uint64(naccts+2)
-
- for i := 0; i < naccts; i++ {
- var seed crypto.Seed
- crypto.RandBytes(seed[:])
- key := crypto.GenerateSignatureSecrets(seed)
- addr := basics.Address(key.SignatureVerifier)
-
- keys = append(keys, key)
- addrs = append(addrs, addr)
-
- adata := basics.AccountData{}
- adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts)
- accts[addr] = adata
- }
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[testSinkAddr] = sinkdata
-
- genesisHash := blk.BlockHeader.GenesisHash
-
- return InitState{blk, accts, genesisHash}, addrs, keys
-}
-
func BenchmarkManyAccounts(b *testing.B) {
deadlock.Opts.Disable = true
b.StopTimer()
- genesisInitState, addrs, _ := genesis(1)
+ genesisInitState, addrs, _ := ledgertesting.Genesis(1)
addr := addrs[0]
dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
@@ -138,7 +92,7 @@ func BenchmarkManyAccounts(b *testing.B) {
func BenchmarkValidate(b *testing.B) {
b.StopTimer()
- genesisInitState, addrs, keys := genesis(10000)
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10000)
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
diff --git a/ledger/testing/accountsTotals.go b/ledger/testing/accountsTotals.go
new file mode 100644
index 000000000..b646a6829
--- /dev/null
+++ b/ledger/testing/accountsTotals.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ gotesting "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// CalculateNewRoundAccountTotals calculates the accounts totals for a given round
+func CalculateNewRoundAccountTotals(t *gotesting.T, newRoundDeltas ledgercore.AccountDeltas, newRoundRewardLevel uint64, newRoundConsensusParams config.ConsensusParams, prevRoundBalances map[basics.Address]basics.AccountData, prevRoundTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
+ newTotals = prevRoundTotals
+ var ot basics.OverflowTracker
+ newTotals.ApplyRewards(newRoundRewardLevel, &ot)
+ for i := 0; i < newRoundDeltas.Len(); i++ {
+ addr, ad := newRoundDeltas.GetByIdx(i)
+ newTotals.DelAccount(newRoundConsensusParams, prevRoundBalances[addr], &ot)
+ newTotals.AddAccount(newRoundConsensusParams, ad, &ot)
+ }
+ require.False(t, ot.Overflowed)
+ return
+}
diff --git a/ledger/testing/initState.go b/ledger/testing/initState.go
new file mode 100644
index 000000000..ad96e1f76
--- /dev/null
+++ b/ledger/testing/initState.go
@@ -0,0 +1,111 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var poolSecret, sinkSecret *crypto.SignatureSecrets
+
+func init() {
+ var seed crypto.Seed
+
+ incentivePoolName := []byte("incentive pool")
+ copy(seed[:], incentivePoolName)
+ poolSecret = crypto.GenerateSignatureSecrets(seed)
+
+ feeSinkName := []byte("fee sink")
+ copy(seed[:], feeSinkName)
+ sinkSecret = crypto.GenerateSignatureSecrets(seed)
+}
+
+// GenerateInitState generates testing init state
+func GenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState ledgercore.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
+ params := config.Consensus[proto]
+ poolAddr := testPoolAddr
+ sinkAddr := testSinkAddr
+
+ var zeroSeed crypto.Seed
+ var genaddrs [10]basics.Address
+ var gensecrets [10]*crypto.SignatureSecrets
+ for i := range genaddrs {
+ seed := zeroSeed
+ seed[0] = byte(i)
+ x := crypto.GenerateSignatureSecrets(seed)
+ genaddrs[i] = basics.Address(x.SignatureVerifier)
+ gensecrets[i] = x
+ }
+
+ initKeys = make(map[basics.Address]*crypto.SignatureSecrets)
+ initAccounts := make(map[basics.Address]basics.AccountData)
+ for i := range genaddrs {
+ initKeys[genaddrs[i]] = gensecrets[i]
+ // Give each account quite a bit more balance than MinFee or MinBalance
+ initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
+ }
+ initKeys[poolAddr] = poolSecret
+ initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
+ initKeys[sinkAddr] = sinkSecret
+ initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
+
+ incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
+ var initialRewardsPerRound uint64
+ if params.InitialRewardsRateCalculation {
+ initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+
+ initBlock := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ GenesisID: tb.Name(),
+ Round: 0,
+ RewardsState: bookkeeping.RewardsState{
+ RewardsRate: initialRewardsPerRound,
+ RewardsPool: poolAddr,
+ FeeSink: sinkAddr,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: proto,
+ },
+ },
+ }
+
+ var err error
+ initBlock.TxnRoot, err = initBlock.PaysetCommit()
+ require.NoError(tb, err)
+
+ if params.SupportGenesisHash {
+ initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name()))
+ }
+
+ genesisInitState.Block = initBlock
+ genesisInitState.Accounts = initAccounts
+ genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name()))
+
+ return
+}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
new file mode 100644
index 000000000..86ec6a60c
--- /dev/null
+++ b/ledger/testing/randomAccounts.go
@@ -0,0 +1,344 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+ //"github.com/algorand/go-algorand/data/bookkeeping"
+
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+
+// RandomAddress generates a random address
+func RandomAddress() basics.Address {
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+ return addr
+}
+
+// RandomNote generates a random notes data
+func RandomNote() []byte {
+ var note [16]byte
+ crypto.RandBytes(note[:])
+ return note[:]
+}
+
+// RandomAccountData generates a random AccountData
+func RandomAccountData(rewardsLevel uint64) basics.AccountData {
+ var data basics.AccountData
+
+ // Avoid overflowing totals
+ data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
+
+ switch crypto.RandUint64() % 3 {
+ case 0:
+ data.Status = basics.Online
+ case 1:
+ data.Status = basics.Offline
+ default:
+ data.Status = basics.NotParticipating
+ }
+
+ data.RewardsBase = rewardsLevel
+ data.VoteFirstValid = 0
+ data.VoteLastValid = 1000
+ return data
+}
+
+// RandomFullAccountData generates a random AccountData
+func RandomFullAccountData(rewardsLevel, lastCreatableID uint64) (basics.AccountData, uint64) {
+ data := RandomAccountData(rewardsLevel)
+
+ crypto.RandBytes(data.VoteID[:])
+ crypto.RandBytes(data.SelectionID[:])
+ data.VoteFirstValid = basics.Round(crypto.RandUint64())
+ data.VoteLastValid = basics.Round(crypto.RandUint64())
+ data.VoteKeyDilution = crypto.RandUint64()
+ if 1 == (crypto.RandUint64() % 2) {
+ // if account has created assets, have these defined.
+ data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
+ createdAssetsCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < createdAssetsCount; i++ {
+ ap := basics.AssetParams{
+ Total: crypto.RandUint64(),
+ Decimals: uint32(crypto.RandUint64() % 20),
+ DefaultFrozen: (crypto.RandUint64()%2 == 0),
+ UnitName: fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ AssetName: fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ URL: fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ }
+ crypto.RandBytes(ap.MetadataHash[:])
+ crypto.RandBytes(ap.Manager[:])
+ crypto.RandBytes(ap.Reserve[:])
+ crypto.RandBytes(ap.Freeze[:])
+ crypto.RandBytes(ap.Clawback[:])
+ lastCreatableID++
+ data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
+ }
+ }
+ if 1 == (crypto.RandUint64()%2) && lastCreatableID > 0 {
+ // if account owns assets
+ data.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
+ ownedAssetsCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < ownedAssetsCount; i++ {
+ ah := basics.AssetHolding{
+ Amount: crypto.RandUint64(),
+ Frozen: (crypto.RandUint64()%2 == 0),
+ }
+ data.Assets[basics.AssetIndex(crypto.RandUint64()%lastCreatableID)] = ah
+ }
+ }
+ if 1 == (crypto.RandUint64() % 5) {
+ crypto.RandBytes(data.AuthAddr[:])
+ }
+
+ if 1 == (crypto.RandUint64()%3) && lastCreatableID > 0 {
+ data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
+ appStatesCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < appStatesCount; i++ {
+ ap := basics.AppLocalState{
+ Schema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ KeyValue: make(map[string]basics.TealValue),
+ }
+
+ for i := uint64(0); i < ap.Schema.NumUint; i++ {
+ appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
+ ap.KeyValue[appName] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: crypto.RandUint64(),
+ }
+ }
+ for i := uint64(0); i < ap.Schema.NumByteSlice; i++ {
+ appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
+ tv := basics.TealValue{
+ Type: basics.TealBytesType,
+ }
+ bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(appName)))
+ crypto.RandBytes(bytes[:])
+ tv.Bytes = string(bytes)
+ ap.KeyValue[appName] = tv
+ }
+ if len(ap.KeyValue) == 0 {
+ ap.KeyValue = nil
+ }
+ data.AppLocalStates[basics.AppIndex(crypto.RandUint64()%lastCreatableID)] = ap
+ }
+ }
+
+ if 1 == (crypto.RandUint64() % 3) {
+ data.TotalAppSchema = basics.StateSchema{
+ NumUint: crypto.RandUint64() % 50,
+ NumByteSlice: crypto.RandUint64() % 50,
+ }
+ }
+ if 1 == (crypto.RandUint64() % 3) {
+ data.AppParams = make(map[basics.AppIndex]basics.AppParams)
+ appParamsCount := crypto.RandUint64()%5 + 1
+ for i := uint64(0); i < appParamsCount; i++ {
+ ap := basics.AppParams{
+ ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ GlobalState: make(basics.TealKeyValue),
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ },
+ }
+ if len(ap.ApprovalProgram) > 0 {
+ crypto.RandBytes(ap.ApprovalProgram[:])
+ } else {
+ ap.ApprovalProgram = nil
+ }
+ if len(ap.ClearStateProgram) > 0 {
+ crypto.RandBytes(ap.ClearStateProgram[:])
+ } else {
+ ap.ClearStateProgram = nil
+ }
+
+ for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumUint+ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
+ appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ ap.GlobalState[appName] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: crypto.RandUint64(),
+ }
+ }
+ for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumByteSlice+ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
+ appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ tv := basics.TealValue{
+ Type: basics.TealBytesType,
+ }
+ bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen))
+ crypto.RandBytes(bytes[:])
+ tv.Bytes = string(bytes)
+ ap.GlobalState[appName] = tv
+ }
+ if len(ap.GlobalState) == 0 {
+ ap.GlobalState = nil
+ }
+ lastCreatableID++
+ data.AppParams[basics.AppIndex(lastCreatableID)] = ap
+ }
+
+ }
+ return data, lastCreatableID
+}
+
+// RandomAccounts generates a random set of accounts map
+func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.AccountData {
+ res := make(map[basics.Address]basics.AccountData)
+ if simpleAccounts {
+ for i := 0; i < niter; i++ {
+ res[RandomAddress()] = RandomAccountData(0)
+ }
+ } else {
+ lastCreatableID := crypto.RandUint64() % 512
+ for i := 0; i < niter; i++ {
+ res[RandomAddress()], lastCreatableID = RandomFullAccountData(0, lastCreatableID)
+ }
+ }
+ return res
+}
+
+// RandomDeltas generates a random set of accounts delta
+func RandomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64) {
+ updates, totals, imbalance, _ = RandomDeltasImpl(niter, base, rewardsLevel, true, 0)
+ return
+}
+
+// RandomDeltasFull generates a random set of accounts delta
+func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
+ updates, totals, imbalance, lastCreatableID = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+ return
+}
+
+// RandomDeltasImpl generates a random set of accounts delta
+func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ totals = make(map[basics.Address]basics.AccountData)
+
+ // copy base -> totals
+ for addr, data := range base {
+ totals[addr] = data
+ }
+
+ // if making a full delta then need to determine max asset/app id to get rid of conflicts
+ lastCreatableID = lastCreatableIDIn
+ if !simple {
+ for _, ad := range base {
+ for aid := range ad.AssetParams {
+ if uint64(aid) > lastCreatableID {
+ lastCreatableID = uint64(aid)
+ }
+ }
+ for aid := range ad.AppParams {
+ if uint64(aid) > lastCreatableID {
+ lastCreatableID = uint64(aid)
+ }
+ }
+ }
+ }
+
+ // Change some existing accounts
+ {
+ i := 0
+ for addr, old := range base {
+ if i >= len(base)/2 || i >= niter {
+ break
+ }
+
+ if addr == testPoolAddr {
+ continue
+ }
+ i++
+
+ var new basics.AccountData
+ if simple {
+ new = RandomAccountData(rewardsLevel)
+ } else {
+ new, lastCreatableID = RandomFullAccountData(rewardsLevel, lastCreatableID)
+ }
+ updates.Upsert(addr, new)
+ imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
+ totals[addr] = new
+ }
+ }
+
+ // Change some new accounts
+ for i := 0; i < niter; i++ {
+ addr := RandomAddress()
+ old := totals[addr]
+ var new basics.AccountData
+ if simple {
+ new = RandomAccountData(rewardsLevel)
+ } else {
+ new, lastCreatableID = RandomFullAccountData(rewardsLevel, lastCreatableID)
+ }
+ updates.Upsert(addr, new)
+ imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
+ totals[addr] = new
+ }
+
+ return
+}
+
+// RandomDeltasBalanced generates a random set of accounts delta
+func RandomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData) {
+ updates, totals, _ = RandomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
+ return
+}
+
+// RandomDeltasBalancedFull generates a random set of accounts delta
+func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
+ updates, totals, lastCreatableID = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+ return
+}
+
+// RandomDeltasBalancedImpl generates a random set of accounts delta
+func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
+ var imbalance int64
+ if simple {
+ updates, totals, imbalance = RandomDeltas(niter, base, rewardsLevel)
+ } else {
+ updates, totals, imbalance, lastCreatableID = RandomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
+ }
+
+ oldPool := base[testPoolAddr]
+ newPool := oldPool
+ newPool.MicroAlgos.Raw += uint64(imbalance)
+
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ return updates, totals, lastCreatableID
+}
diff --git a/ledger/testing/testGenesis.go b/ledger/testing/testGenesis.go
new file mode 100644
index 000000000..a24c46c57
--- /dev/null
+++ b/ledger/testing/testGenesis.go
@@ -0,0 +1,137 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// NewTestGenesis creates a bunch of accounts, splits up 10B algos
+// between them and the rewardspool and feesink, and gives out the
+// addresses and secrets it creates to enable tests. For special
+// scenarios, manipulate these return values before using newTestLedger.
+func NewTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ // irrelevant, but deterministic
+ sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
+ if err != nil {
+ panic(err)
+ }
+ rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
+ if err != nil {
+ panic(err)
+ }
+
+ const count = 10
+ addrs := make([]basics.Address, count)
+ secrets := make([]*crypto.SignatureSecrets, count)
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(count+2)
+
+ for i := 0; i < count; i++ {
+ // Create deterministic addresses, so that output stays the same, run to run.
+ var seed crypto.Seed
+ seed[0] = byte(i)
+ secrets[i] = crypto.GenerateSignatureSecrets(seed)
+ addrs[i] = basics.Address(secrets[i].SignatureVerifier)
+
+ adata := basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+ accts[addrs[i]] = adata
+ }
+
+ accts[sink] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ Status: basics.NotParticipating,
+ }
+
+ accts[rewards] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+
+ genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
+
+ return genBalances, addrs, secrets
+}
+
+// Genesis creates a genesis state for naccts accounts using the ConsensusCurrentVersion
+func Genesis(naccts int) (ledgercore.InitState, []basics.Address, []*crypto.SignatureSecrets) {
+ return GenesisWithProto(naccts, protocol.ConsensusCurrentVersion)
+}
+
+// GenesisWithProto creates a genesis state for naccts accounts using the proto consensus protocol
+func GenesisWithProto(naccts int, proto protocol.ConsensusVersion) (ledgercore.InitState, []basics.Address, []*crypto.SignatureSecrets) {
+ blk := bookkeeping.Block{}
+ blk.CurrentProtocol = proto
+ blk.BlockHeader.GenesisID = "test"
+ blk.FeeSink = testSinkAddr
+ blk.RewardsPool = testPoolAddr
+
+ crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
+
+ addrs := []basics.Address{}
+ keys := []*crypto.SignatureSecrets{}
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(naccts+2)
+
+ for i := 0; i < naccts; i++ {
+ var seed crypto.Seed
+ crypto.RandBytes(seed[:])
+ key := crypto.GenerateSignatureSecrets(seed)
+ addr := basics.Address(key.SignatureVerifier)
+
+ keys = append(keys, key)
+ addrs = append(addrs, addr)
+
+ adata := basics.AccountData{}
+ adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts)
+ accts[addr] = adata
+ }
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[testSinkAddr] = sinkdata
+
+ genesisHash := blk.BlockHeader.GenesisHash
+
+ incentivePoolBalanceAtGenesis := pooldata.MicroAlgos
+ var initialRewardsPerRound uint64
+ params := config.Consensus[proto]
+ if params.InitialRewardsRateCalculation {
+ initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+ blk.RewardsRate = initialRewardsPerRound
+
+ return ledgercore.InitState{Block: blk, Accounts: accts, GenesisHash: genesisHash}, addrs, keys
+}
diff --git a/ledger/time.go b/ledger/time.go
deleted file mode 100644
index 979234eb6..000000000
--- a/ledger/time.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package ledger
-
-import (
- "fmt"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger/ledgercore"
-)
-
-type timeTracker struct {
- timestamps map[basics.Round]int64
-}
-
-func (tt *timeTracker) loadFromDisk(l ledgerForTracker) error {
- latest := l.Latest()
- blkhdr, err := l.BlockHdr(latest)
- if err != nil {
- return err
- }
-
- tt.timestamps = make(map[basics.Round]int64)
- tt.timestamps[latest] = blkhdr.TimeStamp
- return nil
-}
-
-func (tt *timeTracker) close() {
-}
-
-func (tt *timeTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
- rnd := blk.Round()
- tt.timestamps[rnd] = delta.Hdr.TimeStamp
-}
-
-func (tt *timeTracker) committedUpTo(committedRnd basics.Round) basics.Round {
- for rnd := range tt.timestamps {
- if rnd < committedRnd {
- delete(tt.timestamps, rnd)
- }
- }
- return committedRnd
-}
-
-func (tt *timeTracker) timestamp(r basics.Round) (int64, error) {
- ts, ok := tt.timestamps[r]
- if ok {
- return ts, nil
- }
-
- return 0, fmt.Errorf("no record of timestamp for round %d", r)
-}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index 40dd725a5..855995665 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -17,16 +17,24 @@
package ledger
import (
+ "context"
+ "database/sql"
+ "errors"
"fmt"
"reflect"
+ "sync"
+ "time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-deadlock"
)
// ledgerTracker defines part of the API for any state machine that
@@ -55,26 +63,56 @@ type ledgerTracker interface {
// blocks from the database, or access its own state. The
// ledgerForTracker interface abstracts away the details of
// ledger internals so that individual trackers can be tested
- // in isolation.
- loadFromDisk(ledgerForTracker) error
+ // in isolation. The provided round number represents the
+ // current accounts storage round number.
+ loadFromDisk(ledgerForTracker, basics.Round) error
- // newBlock informs the tracker of a new block from round
- // rnd and a given ledgercore.StateDelta as produced by BlockEvaluator.
+ // newBlock informs the tracker of a new block along with
+ // a given ledgercore.StateDelta as produced by BlockEvaluator.
newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta)
- // committedUpTo informs the tracker that the database has
+ // committedUpTo informs the tracker that the block database has
// committed all blocks up to and including rnd to persistent
- // storage (the SQL database). This can allow the tracker
+ // storage. This can allow the tracker
// to garbage-collect state that will not be needed.
//
// committedUpTo() returns the round number of the earliest
- // block that this tracker needs to be stored in the ledger
- // for subsequent calls to loadFromDisk(). All blocks with
- // round numbers before that may be deleted to save space,
- // and the tracker is expected to still function after a
- // restart and a call to loadFromDisk(). For example,
- // returning 0 means that no blocks can be deleted.
- committedUpTo(basics.Round) basics.Round
+ // block that this tracker needs to be stored in the block
+ // database for subsequent calls to loadFromDisk().
+ // All blocks with round numbers before that may be deleted to
+ // save space, and the tracker is expected to still function
+ // after a restart and a call to loadFromDisk().
+ // For example, returning 0 means that no blocks can be deleted.
+ // Separetly, the method returns the lookback that is being
+ // maintained by the tracker.
+ committedUpTo(basics.Round) (minRound, lookback basics.Round)
+
+ // produceCommittingTask prepares a deferredCommitRange; Preparing a deferredCommitRange is a joint
+ // effort, and all the trackers contribute to that effort. All the trackers are being handed a
+ // pointer to the deferredCommitRange, and have the ability to either modify it, or return a
+ // nil. If nil is returned, the commit would be skipped.
+ produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange
+
+ // prepareCommit, commitRound and postCommit are called when it is time to commit tracker's data.
+ // If an error returned the process is aborted.
+
+ // prepareCommit aligns the data structures stored in the deferredCommitContext with the current
+ // state of the tracker. It allows the tracker to decide what data is going to be persisted
+ // on the coming commitRound.
+ prepareCommit(*deferredCommitContext) error
+ // commitRound is called for each of the trackers after a deferredCommitContext was agreed upon
+ // by all the prepareCommit calls. The commitRound is being executed within a single transactional
+ // context, and so, if any of the tracker's commitRound calls fails, the transaction is rolled back.
+ commitRound(context.Context, *sql.Tx, *deferredCommitContext) error
+ // postCommit is called only on a successful commitRound. In that case, each of the trackers have
+ // the chance to update it's internal data structures, knowing that the given deferredCommitContext
+ // has completed. An optional context is provided for long-running operations.
+ postCommit(context.Context, *deferredCommitContext)
+
+ // handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+ // Tracker might update own state in this case. For example, account updates tracker cancels
+ // scheduled catchpoint writing that deferred commit.
+ handleUnorderedCommit(uint64, basics.Round, basics.Round)
// close terminates the tracker, reclaiming any resources
// like open database connections or goroutines. close may
@@ -89,26 +127,142 @@ type ledgerForTracker interface {
trackerDB() db.Pair
blockDB() db.Pair
trackerLog() logging.Logger
- trackerEvalVerified(bookkeeping.Block, ledgerForEvaluator) (ledgercore.StateDelta, error)
+ trackerEvalVerified(bookkeeping.Block, internal.LedgerForEvaluator) (ledgercore.StateDelta, error)
Latest() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
GenesisHash() crypto.Digest
GenesisProto() config.ConsensusParams
+ GenesisAccounts() map[basics.Address]basics.AccountData
}
type trackerRegistry struct {
trackers []ledgerTracker
+ // the accts has some exceptional usages in the tracker registry.
+ accts *accountUpdates
+
+ // ctx is the context for the committing go-routine.
+ ctx context.Context
+ // ctxCancel is the canceling function for canceling the committing go-routine ( i.e. signaling the committing go-routine that it's time to abort )
+ ctxCancel context.CancelFunc
+
+ // deferredCommits is the channel of pending deferred commits
+ deferredCommits chan *deferredCommitContext
+
+ // commitSyncerClosed is the blocking channel for synchronizing closing the commitSyncer goroutine. Once it's closed, the
+ // commitSyncer can be assumed to have aborted.
+ commitSyncerClosed chan struct{}
+
+ // accountsWriting provides synchronization around the background writing of account balances.
+ accountsWriting sync.WaitGroup
+
+ // dbRound is always exactly accountsRound(),
+ // cached to avoid SQL queries.
+ dbRound basics.Round
+
+ dbs db.Pair
+ log logging.Logger
+
+ // the synchronous mode that would be used for the account database.
+ synchronousMode db.SynchronousMode
+
+ // the synchronous mode that would be used while the accounts database is being rebuilt.
+ accountsRebuildSynchronousMode db.SynchronousMode
+
+ mu deadlock.RWMutex
+
+ // lastFlushTime is the time we last flushed updates to
+ // the accounts DB (bumping dbRound).
+ lastFlushTime time.Time
+}
+
+// deferredCommitRange is used during the calls to produceCommittingTask, and used as a data structure
+// to syncronize the various trackers and create a uniformity around which rounds need to be persisted
+// next.
+type deferredCommitRange struct {
+ offset uint64
+ oldBase basics.Round
+ lookback basics.Round
+
+ // pendingDeltas is the number of accounts that were modified within this commit context.
+ // note that in this number we might have the same account being modified several times.
+ pendingDeltas int
+
+ isCatchpointRound bool
+
+ // catchpointWriting is a pointer to a varible with the same name in the catchpointTracker.
+ // it's used in order to reset the catchpointWriting flag from the acctupdates's
+ // prepareCommit/commitRound ( which is called before the corresponding catchpoint tracker method )
+ catchpointWriting *int32
}
-func (tr *trackerRegistry) register(lt ledgerTracker) {
- tr.trackers = append(tr.trackers, lt)
+// deferredCommitContext is used in order to syncornize the persistence of a given deferredCommitRange.
+// prepareCommit, commitRound and postCommit are all using it to exchange data.
+type deferredCommitContext struct {
+ deferredCommitRange
+
+ newBase basics.Round
+ flushTime time.Time
+
+ genesisProto config.ConsensusParams
+
+ deltas []ledgercore.AccountDeltas
+ roundTotals ledgercore.AccountTotals
+ compactAccountDeltas compactAccountDeltas
+ compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable
+
+ updatedPersistedAccounts []persistedAccountData
+
+ committedRoundDigest crypto.Digest
+ trieBalancesHash crypto.Digest
+ updatingBalancesDuration time.Duration
+ catchpointLabel string
+
+ stats telemetryspec.AccountsUpdateMetrics
+ updateStats bool
+}
+
+var errMissingAccountUpdateTracker = errors.New("initializeTrackerCaches : called without a valid accounts update tracker")
+
+func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTracker, cfg config.Local) (err error) {
+ tr.dbs = l.trackerDB()
+ tr.log = l.trackerLog()
+
+ err = tr.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ tr.dbRound, err = accountsRound(tx)
+ return err
+ })
+
+ if err != nil {
+ return err
+ }
+
+ tr.ctx, tr.ctxCancel = context.WithCancel(context.Background())
+ tr.deferredCommits = make(chan *deferredCommitContext, 1)
+ tr.commitSyncerClosed = make(chan struct{})
+ tr.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
+ tr.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
+ go tr.commitSyncer(tr.deferredCommits)
+
+ tr.trackers = append([]ledgerTracker{}, trackers...)
+
+ for _, tracker := range tr.trackers {
+ if accts, ok := tracker.(*accountUpdates); ok {
+ tr.accts = accts
+ break
+ }
+ }
+ return
}
func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ tr.mu.RUnlock()
+
for _, lt := range tr.trackers {
- err := lt.loadFromDisk(l)
+ err := lt.loadFromDisk(l, dbRound)
if err != nil {
// find the tracker name.
trackerName := reflect.TypeOf(lt).String()
@@ -116,34 +270,382 @@ func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
}
}
- return nil
+ err := tr.initializeTrackerCaches(l)
+ if err != nil {
+ return err
+ }
+ // the votes have a special dependency on the account updates, so we need to initialize these separetly.
+ tr.accts.voters = &votersTracker{}
+ err = tr.accts.voters.loadFromDisk(l, tr.accts)
+ return err
}
func (tr *trackerRegistry) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
for _, lt := range tr.trackers {
lt.newBlock(blk, delta)
}
- if len(tr.trackers) == 0 {
- fmt.Printf("trackerRegistry::newBlock - no trackers (%d)\n", blk.Round())
- }
}
func (tr *trackerRegistry) committedUpTo(rnd basics.Round) basics.Round {
minBlock := rnd
-
+ maxLookback := basics.Round(0)
for _, lt := range tr.trackers {
- retain := lt.committedUpTo(rnd)
- if retain < minBlock {
- minBlock = retain
+ retainRound, lookback := lt.committedUpTo(rnd)
+ if retainRound < minBlock {
+ minBlock = retainRound
+ }
+ if lookback > maxLookback {
+ maxLookback = lookback
}
}
+ tr.scheduleCommit(rnd, maxLookback)
+
return minBlock
}
+func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) {
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ tr.mu.RUnlock()
+
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+ cdr := &dcc.deferredCommitRange
+ for _, lt := range tr.trackers {
+ cdr = lt.produceCommittingTask(blockqRound, dbRound, cdr)
+ if cdr == nil {
+ break
+ }
+ }
+ if cdr != nil {
+ dcc.deferredCommitRange = *cdr
+ }
+
+ tr.mu.RLock()
+ // If we recently flushed, wait to aggregate some more blocks.
+ // ( unless we're creating a catchpoint, in which case we want to flush it right away
+ // so that all the instances of the catchpoint would contain exactly the same data )
+ flushTime := time.Now()
+ if dcc != nil && !flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval)) && !dcc.isCatchpointRound && dcc.pendingDeltas < pendingDeltasFlushThreshold {
+ dcc = nil
+ }
+ tr.mu.RUnlock()
+
+ if dcc != nil {
+ tr.accountsWriting.Add(1)
+ tr.deferredCommits <- dcc
+ }
+}
+
+// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
+func (tr *trackerRegistry) waitAccountsWriting() {
+ tr.accountsWriting.Wait()
+}
+
func (tr *trackerRegistry) close() {
+ if tr.ctxCancel != nil {
+ tr.ctxCancel()
+ }
+
+ // close() is called from reloadLedger() when and trackerRegistry is not initialized yet
+ if tr.commitSyncerClosed != nil {
+ tr.waitAccountsWriting()
+ // this would block until the commitSyncerClosed channel get closed.
+ <-tr.commitSyncerClosed
+ }
+
for _, lt := range tr.trackers {
lt.close()
}
tr.trackers = nil
+ tr.accts = nil
+}
+
+// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and
+// send the tasks to commitRound for completing the operation.
+func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitContext) {
+ defer close(tr.commitSyncerClosed)
+ for {
+ select {
+ case commit, ok := <-deferredCommits:
+ if !ok {
+ return
+ }
+ tr.commitRound(commit)
+ case <-tr.ctx.Done():
+ // drain the pending commits queue:
+ drained := false
+ for !drained {
+ select {
+ case <-deferredCommits:
+ tr.accountsWriting.Done()
+ default:
+ drained = true
+ }
+ }
+ return
+ }
+ }
+}
+
+// commitRound commits the given deferredCommitContext via the trackers.
+func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
+ defer tr.accountsWriting.Done()
+ tr.mu.RLock()
+
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+ lookback := dcc.lookback
+
+ // we can exit right away, as this is the result of mis-ordered call to committedUpTo.
+ if tr.dbRound < dbRound || offset < uint64(tr.dbRound-dbRound) {
+ tr.log.Warnf("out of order deferred commit: offset %d, dbRound %d but current tracker DB round is %d", offset, dbRound, tr.dbRound)
+ for _, lt := range tr.trackers {
+ lt.handleUnorderedCommit(offset, dbRound, lookback)
+ }
+ tr.mu.RUnlock()
+ return
+ }
+
+ // adjust the offset according to what happened meanwhile..
+ offset -= uint64(tr.dbRound - dbRound)
+
+ // if this iteration need to flush out zero rounds, just return right away.
+ // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be
+ // flush, without the commitRound have a chance of committing these rounds.
+ if offset == 0 {
+ tr.mu.RUnlock()
+ return
+ }
+
+ dbRound = tr.dbRound
+ newBase := basics.Round(offset) + dbRound
+
+ dcc.offset = offset
+ dcc.oldBase = dbRound
+ dcc.newBase = newBase
+ dcc.flushTime = time.Now()
+
+ for _, lt := range tr.trackers {
+ err := lt.prepareCommit(dcc)
+ if err != nil {
+ tr.log.Errorf(err.Error())
+ tr.mu.RUnlock()
+ return
+ }
+ }
+ tr.mu.RUnlock()
+
+ start := time.Now()
+ ledgerCommitroundCount.Inc(nil)
+ err := tr.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ for _, lt := range tr.trackers {
+ err0 := lt.commitRound(ctx, tx, dcc)
+ if err0 != nil {
+ return err0
+ }
+ }
+
+ err = updateAccountsRound(tx, dbRound+basics.Round(offset))
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+ ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
+
+ if err != nil {
+ tr.log.Warnf("unable to advance tracker db snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
+ return
+ }
+
+ tr.mu.Lock()
+ tr.dbRound = newBase
+ for _, lt := range tr.trackers {
+ lt.postCommit(tr.ctx, dcc)
+ }
+ tr.lastFlushTime = dcc.flushTime
+ tr.mu.Unlock()
+
+}
+
+// initializeTrackerCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
+// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
+// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
+func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err error) {
+ lastestBlockRound := l.Latest()
+ lastBalancesRound := tr.dbRound
+
+ var blk bookkeeping.Block
+ var delta ledgercore.StateDelta
+
+ if tr.accts == nil {
+ return errMissingAccountUpdateTracker
+ }
+
+ accLedgerEval := accountUpdatesLedgerEvaluator{
+ au: tr.accts,
+ }
+
+ if lastBalancesRound < lastestBlockRound {
+ accLedgerEval.prevHeader, err = l.BlockHdr(lastBalancesRound)
+ if err != nil {
+ return err
+ }
+ }
+
+ skipAccountCacheMessage := make(chan struct{})
+ writeAccountCacheMessageCompleted := make(chan struct{})
+ defer func() {
+ close(skipAccountCacheMessage)
+ select {
+ case <-writeAccountCacheMessageCompleted:
+ if err == nil {
+ tr.log.Infof("initializeTrackerCaches completed initializing account data caches")
+ }
+ default:
+ }
+ }()
+
+ catchpointInterval := uint64(0)
+ for _, tracker := range tr.trackers {
+ if catchpointTracker, ok := tracker.(*catchpointTracker); ok {
+ catchpointInterval = catchpointTracker.catchpointInterval
+ break
+ }
+ }
+
+ // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds.
+ // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message
+ // within the above timeout.
+ go func() {
+ select {
+ case <-time.After(initializingAccountCachesMessageTimeout):
+ tr.log.Infof("initializeTrackerCaches is initializing account data caches")
+ close(writeAccountCacheMessageCompleted)
+ case <-skipAccountCacheMessage:
+ }
+ }()
+
+ blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream)
+ blockEvalFailed := make(chan struct{}, 1)
+ var blockRetrievalError error
+ go func() {
+ defer close(blocksStream)
+ for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ {
+ blk, blockRetrievalError = l.Block(roundNumber)
+ if blockRetrievalError != nil {
+ return
+ }
+ select {
+ case blocksStream <- blk:
+ case <-blockEvalFailed:
+ return
+ }
+ }
+ }()
+
+ lastFlushedRound := lastBalancesRound
+ const accountsCacheLoadingMessageInterval = 5 * time.Second
+ lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2)
+
+ // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that
+ // we exit this mode when we're done.
+ rollbackSynchronousMode := false
+ defer func() {
+ if rollbackSynchronousMode {
+ // restore default synchronous mode
+ err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.synchronousMode, tr.synchronousMode >= db.SynchronousModeFull)
+ // override the returned error only in case there is no error - since this
+ // operation has a lower criticality.
+ if err == nil {
+ err = err0
+ }
+ }
+ }()
+
+ for blk := range blocksStream {
+ delta, err = l.trackerEvalVerified(blk, &accLedgerEval)
+ if err != nil {
+ close(blockEvalFailed)
+ return
+ }
+ tr.newBlock(blk, delta)
+
+ // flush to disk if any of the following applies:
+ // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
+ // 2. if we completed the loading and we loaded up more than 320 rounds.
+ flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
+ loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
+ if flushIntervalExceed || loadCompleted {
+ // adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
+ tr.lastFlushTime = time.Now().Add(-balancesFlushInterval)
+
+ if !rollbackSynchronousMode {
+ // switch to rebuild synchronous mode to improve performance
+ err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.accountsRebuildSynchronousMode, tr.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
+ if err0 != nil {
+ tr.log.Warnf("initializeTrackerCaches was unable to switch to rbuild synchronous mode : %v", err0)
+ } else {
+ // flip the switch to rollback the synchronous mode once we're done.
+ rollbackSynchronousMode = true
+ }
+ }
+
+ var roundsBehind basics.Round
+
+ // flush the account data
+ tr.scheduleCommit(blk.Round(), basics.Round(config.Consensus[blk.BlockHeader.CurrentProtocol].MaxBalLookback))
+ // wait for the writing to complete.
+ tr.waitAccountsWriting()
+
+ func() {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+
+ // The au.dbRound after writing should be ~320 behind the block round.
+ roundsBehind = blk.Round() - tr.dbRound
+ }()
+
+ // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
+ if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(catchpointInterval) {
+ // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
+ // would just accumulate in memory.
+ close(blockEvalFailed)
+ tr.log.Errorf("initializeTrackerCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", blk.Round()-roundsBehind, blk.Round())
+ err = fmt.Errorf("initializeTrackerCaches failed to initialize the account data caches")
+ return
+ }
+
+ // and once we flushed it to disk, update the lastFlushedRound
+ lastFlushedRound = blk.Round()
+ }
+
+ // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess.
+ if time.Since(lastProgressMessage) > accountsCacheLoadingMessageInterval {
+ // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right.
+ select {
+ case skipAccountCacheMessage <- struct{}{}:
+ // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written.
+ close(writeAccountCacheMessageCompleted)
+ default:
+ }
+ tr.log.Infof("initializeTrackerCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
+ lastProgressMessage = time.Now()
+ }
+
+ // prepare for the next iteration.
+ accLedgerEval.prevHeader = *delta.Hdr
+ }
+
+ if blockRetrievalError != nil {
+ err = blockRetrievalError
+ }
+ return
+
}
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
new file mode 100644
index 000000000..dd73f1a8a
--- /dev/null
+++ b/ledger/trackerdb.go
@@ -0,0 +1,365 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+type trackerDBParams struct {
+ initAccounts map[basics.Address]basics.AccountData
+ initProto config.ConsensusParams
+ catchpointEnabled bool
+ dbPathPrefix string
+}
+
+type trackerDBSchemaInitializer struct {
+ trackerDBParams
+
+ // schemaVersion contains current db version
+ schemaVersion int32
+ // vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
+ vacuumOnStartup bool
+ // newDatabase indicates if the db is newly created
+ newDatabase bool
+
+ log logging.Logger
+}
+
+type trackerDBInitParams struct {
+ schemaVersion int32
+ vacuumOnStartup bool
+}
+
+// trackerDBInitialize initializes the accounts DB if needed and return current account round.
+// as part of the initialization, it tests the current database schema version, and perform upgrade
+// procedures to bring it up to the database schema supported by the binary.
+func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefix string) (mgr trackerDBInitParams, err error) {
+ dbs := l.trackerDB()
+ log := l.trackerLog()
+
+ lastestBlockRound := l.Latest()
+
+ if l.GenesisAccounts() == nil {
+ err = fmt.Errorf("trackerDBInitialize: initAccounts not set")
+ return
+ }
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ tp := trackerDBParams{l.GenesisAccounts(), l.GenesisProto(), catchpointEnabled, dbPathPrefix}
+ var err0 error
+ mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ if err0 != nil {
+ return err0
+ }
+ lastBalancesRound, err := accountsRound(tx)
+ if err != nil {
+ return err
+ }
+ // Check for blocks DB and tracker DB un-sync
+ if lastBalancesRound > lastestBlockRound {
+ log.Warnf("trackerDBInitialize: resetting accounts DB (on round %v, but blocks DB's latest is %v)", lastBalancesRound, lastestBlockRound)
+ err0 = accountsReset(tx)
+ if err0 != nil {
+ return err0
+ }
+ mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ if err0 != nil {
+ return err0
+ }
+ }
+ return nil
+ })
+
+ return
+}
+
+// trackerDBInitializeImpl initializes the accounts DB if needed and return current account round.
+// as part of the initialization, it tests the current database schema version, and perform upgrade
+// procedures to bring it up to the database schema supported by the binary.
+func trackerDBInitializeImpl(ctx context.Context, tx *sql.Tx, params trackerDBParams, log logging.Logger) (mgr trackerDBInitParams, err error) {
+ // check current database version.
+ dbVersion, err := db.GetUserVersion(ctx, tx)
+ if err != nil {
+ return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to read database schema version : %v", err)
+ }
+
+ tu := trackerDBSchemaInitializer{
+ trackerDBParams: params,
+ schemaVersion: dbVersion,
+ log: log,
+ }
+
+ // if database version is greater than supported by current binary, write a warning. This would keep the existing
+ // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
+ if tu.version() > accountDBVersion {
+ tu.log.Warnf("trackerDBInitialize database schema version is %d, but algod supports only %d", tu.version(), accountDBVersion)
+ }
+
+ if tu.version() < accountDBVersion {
+ tu.log.Infof("trackerDBInitialize upgrading database schema from version %d to version %d", tu.version(), accountDBVersion)
+ // newDatabase is determined during the tables creations. If we're filling the database with accounts,
+ // then we set this variable to true, allowing some of the upgrades to be skipped.
+ for tu.version() < accountDBVersion {
+ tu.log.Infof("trackerDBInitialize performing upgrade from version %d", tu.version())
+ // perform the initialization/upgrade
+ switch tu.version() {
+ case 0:
+ err = tu.upgradeDatabaseSchema0(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
+ return
+ }
+ case 1:
+ err = tu.upgradeDatabaseSchema1(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
+ return
+ }
+ case 2:
+ err = tu.upgradeDatabaseSchema2(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
+ return
+ }
+ case 3:
+ err = tu.upgradeDatabaseSchema3(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
+ return
+ }
+ case 4:
+ err = tu.upgradeDatabaseSchema4(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err)
+ return
+ }
+ default:
+ return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
+ }
+ }
+ tu.log.Infof("trackerDBInitialize database schema upgrade complete")
+ }
+
+ return trackerDBInitParams{tu.schemaVersion, tu.vacuumOnStartup}, nil
+}
+
+func (tu *trackerDBSchemaInitializer) setVersion(ctx context.Context, tx *sql.Tx, version int32) (err error) {
+ oldVersion := tu.schemaVersion
+ tu.schemaVersion = version
+ _, err = db.SetUserVersion(ctx, tx, tu.schemaVersion)
+ if err != nil {
+ return fmt.Errorf("trackerDBInitialize unable to update database schema version from %d to %d: %v", oldVersion, version, err)
+ }
+ return nil
+}
+
+func (tu trackerDBSchemaInitializer) version() int32 {
+ return tu.schemaVersion
+}
+
+// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
+//
+// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
+// Any database of version 2.0.8 would be of version 0. At this point, the database might
+// have the following tables : ( i.e. a newly created database would not have these )
+// * acctrounds
+// * accounttotals
+// * accountbase
+// * assetcreators
+// * storedcatchpoints
+// * accounthashes
+// * catchpointstate
+//
+// As the first step of the upgrade, the above tables are being created if they do not already exists.
+// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
+// Last, in case the database was just created, it would get initialized with the following:
+// The accountbase would get initialized with the au.initAccounts
+// The accounttotals would get initialized to align with the initialization account added to accountbase
+// The acctrounds would get updated to indicate that the balance matches round 0
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (err error) {
+ tu.log.Infof("upgradeDatabaseSchema0 initializing schema")
+ tu.newDatabase, err = accountsInit(tx, tu.initAccounts, tu.initProto)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema0 unable to initialize schema : %v", err)
+ }
+ return tu.setVersion(ctx, tx, 1)
+}
+
+// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
+//
+// The schema updated to version 2 intended to ensure that the encoding of all the accounts data is
+// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
+// the upgraded messagepack was decoding the account data correctly, but would have different
+// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
+// would have different encoded representation than the one on disk.
+// To address this, this startup procedure would attempt to scan all the accounts data. for each account data, we would
+// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
+// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
+// once the upgrade is complete, the trackerDBInitialize would (if needed) rebuild the merkle trie using the new
+// encoded accounts.
+//
+// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
+// a functional update to it's content.
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx) (err error) {
+ var modifiedAccounts uint
+ if tu.newDatabase {
+ goto schemaUpdateComplete
+ }
+
+ // update accounts encoding.
+ tu.log.Infof("upgradeDatabaseSchema1 verifying accounts data encoding")
+ modifiedAccounts, err = reencodeAccounts(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ if modifiedAccounts > 0 {
+ tu.log.Infof("upgradeDatabaseSchema1 reencoded %d accounts", modifiedAccounts)
+
+ tu.log.Infof("upgradeDatabaseSchema1 resetting account hashes")
+ // reset the merkle trie
+ err = resetAccountHashes(tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to reset account hashes : %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema1 preparing queries")
+ // initialize a new accountsq with the incoming transaction.
+ accountsq, err := accountsInitDbQueries(tx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to prepare queries : %v", err)
+ }
+
+ // close the prepared statements when we're done with them.
+ defer accountsq.close()
+
+ tu.log.Infof("upgradeDatabaseSchema1 resetting prior catchpoints")
+ // delete the last catchpoint label if we have any.
+ _, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to clear prior catchpoint : %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema1 deleting stored catchpoints")
+ // delete catchpoints.
+ err = deleteStoredCatchpoints(ctx, accountsq, tu.dbPathPrefix)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to delete stored catchpoints : %v", err)
+ }
+ } else {
+ tu.log.Infof("upgradeDatabaseSchema1 found that no accounts needed to be reencoded")
+ }
+
+schemaUpdateComplete:
+ return tu.setVersion(ctx, tx, 2)
+}
+
+// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
+//
+// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
+// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
+// step becomes a no-op.
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx) (err error) {
+ if !tu.newDatabase {
+ tu.vacuumOnStartup = true
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 3)
+}
+
+// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
+// adding the normalizedonlinebalance column to the accountbase table.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (err error) {
+ err = accountsAddNormalizedBalance(tx, tu.initProto)
+ if err != nil {
+ return err
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 4)
+}
+
+// upgradeDatabaseSchema4 does not change the schema but migrates data:
+// remove empty AccountData entries from accountbase table
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx) (err error) {
+ var numDeleted int64
+ var addresses []basics.Address
+
+ if tu.newDatabase {
+ goto done
+ }
+
+ numDeleted, addresses, err = removeEmptyAccountData(tx, tu.catchpointEnabled)
+ if err != nil {
+ return err
+ }
+
+ if tu.catchpointEnabled && len(addresses) > 0 {
+ mc, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ // at this point record deleted and DB is pruned for account data
+ // if hash deletion fails just log it and do not abort startup
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err)
+ goto done
+ }
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err)
+ goto done
+ }
+
+ var totalHashesDeleted int
+ for _, addr := range addresses {
+ hash := accountHashBuilder(addr, basics.AccountData{}, []byte{0x80})
+ deleted, err := trie.Delete(hash)
+ if err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err)
+ } else {
+ if !deleted {
+ tu.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr)
+ } else {
+ totalHashesDeleted++
+ }
+ }
+ }
+
+ if _, err = trie.Commit(); err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to commit changes to merkle trie: %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted)
+ }
+
+done:
+ tu.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted)
+
+ return tu.setVersion(ctx, tx, 5)
+}
diff --git a/ledger/txtail.go b/ledger/txtail.go
index 68cc0f5ed..a5d77e49c 100644
--- a/ledger/txtail.go
+++ b/ledger/txtail.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"fmt"
"github.com/algorand/go-algorand/config"
@@ -43,7 +45,7 @@ type txTail struct {
lowWaterMark basics.Round // the last round known to be committed to disk
}
-func (t *txTail) loadFromDisk(l ledgerForTracker) error {
+func (t *txTail) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
latest := l.Latest()
hdr, err := l.BlockHdr(latest)
if err != nil {
@@ -141,7 +143,7 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
}
}
-func (t *txTail) committedUpTo(rnd basics.Round) basics.Round {
+func (t *txTail) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
maxlife := basics.Round(t.recent[rnd].proto.MaxTxnLife)
for r := range t.recent {
if r+maxlife < rnd {
@@ -152,7 +154,25 @@ func (t *txTail) committedUpTo(rnd basics.Round) basics.Round {
delete(t.lastValid, t.lowWaterMark)
}
- return (rnd + 1).SubSaturate(maxlife)
+ return (rnd + 1).SubSaturate(maxlife), basics.Round(0)
+}
+
+func (t *txTail) prepareCommit(*deferredCommitContext) error {
+ return nil
+}
+
+func (t *txTail) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (t *txTail) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+
+func (t *txTail) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
// txtailMissingRound is returned by checkDup when requested for a round number below the low watermark
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index 9d5d1c2bf..eaaf34a06 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -35,10 +36,11 @@ import (
func TestTxTailCheckdup(t *testing.T) {
partitiontest.PartitionTest(t)
- ledger := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion)
+ accts := ledgertesting.RandomAccounts(10, false)
+ ledger := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{accts})
proto := config.Consensus[protocol.ConsensusCurrentVersion]
tail := txTail{}
- require.NoError(t, tail.loadFromDisk(ledger))
+ require.NoError(t, tail.loadFromDisk(ledger, 0))
lastRound := basics.Round(proto.MaxTxnLife)
lookback := basics.Round(100)
@@ -151,7 +153,7 @@ func TestTxTailLoadFromDisk(t *testing.T) {
var ledger txTailTestLedger
txtail := txTail{}
- err := txtail.loadFromDisk(&ledger)
+ err := txtail.loadFromDisk(&ledger, 0)
require.NoError(t, err)
require.Equal(t, int(config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnLife), len(txtail.recent))
require.Equal(t, testTxTailValidityRange, len(txtail.lastValid))
diff --git a/ledger/voters.go b/ledger/voters.go
index 7f1749175..898604072 100644
--- a/ledger/voters.go
+++ b/ledger/voters.go
@@ -20,14 +20,10 @@ import (
"fmt"
"sync"
- "github.com/algorand/go-deadlock"
-
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -63,7 +59,7 @@ type votersTracker struct {
// the Merkle commitment to online accounts from the previous such block.
// Thus, we maintain X in the round map until we form a compact certificate
// for round X+CompactCertVotersLookback+CompactCertRounds.
- round map[basics.Round]*VotersForRound
+ round map[basics.Round]*ledgercore.VotersForRound
l ledgerForTracker
au *accountUpdates
@@ -73,44 +69,6 @@ type votersTracker struct {
loadWaitGroup sync.WaitGroup
}
-// VotersForRound tracks the top online voting accounts as of a particular
-// round, along with a Merkle tree commitment to those voting accounts.
-type VotersForRound struct {
- // Because it can take some time to compute the top participants and the
- // corresponding Merkle tree, the votersForRound is constructed in
- // the background. This means that fields (participants, adddToPos,
- // tree, and totalWeight) could be nil/zero while a background thread
- // is computing them. Once the fields are set, however, they are
- // immutable, and it is no longer necessary to acquire the lock.
- //
- // If an error occurs while computing the tree in the background,
- // loadTreeError might be set to non-nil instead. That also finalizes
- // the state of this VotersForRound.
- mu deadlock.Mutex
- cond *sync.Cond
- loadTreeError error
-
- // Proto is the ConsensusParams for the round whose balances are reflected
- // in participants.
- Proto config.ConsensusParams
-
- // Participants is the array of top #CompactCertVoters online accounts
- // in this round, sorted by normalized balance (to make sure heavyweight
- // accounts are biased to the front).
- Participants participantsArray
-
- // AddrToPos specifies the position of a given account address (if present)
- // in the Participants array. This allows adding a vote from a given account
- // to the certificate builder.
- AddrToPos map[basics.Address]uint64
-
- // Tree is a constructed Merkle tree of the Participants array.
- Tree *merklearray.Tree
-
- // TotalWeight is the sum of the weights from the Participants array.
- TotalWeight basics.MicroAlgos
-}
-
// votersRoundForCertRound computes the round number whose voting participants
// will be used to sign the compact cert for certRnd.
func votersRoundForCertRound(certRnd basics.Round, proto config.ConsensusParams) basics.Round {
@@ -124,7 +82,7 @@ func votersRoundForCertRound(certRnd basics.Round, proto config.ConsensusParams)
func (vt *votersTracker) loadFromDisk(l ledgerForTracker, au *accountUpdates) error {
vt.l = l
vt.au = au
- vt.round = make(map[basics.Round]*VotersForRound)
+ vt.round = make(map[basics.Round]*ledgercore.VotersForRound)
latest := l.Latest()
hdr, err := l.BlockHdr(latest)
@@ -173,23 +131,20 @@ func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) {
return
}
- tr := &VotersForRound{
- Proto: proto,
- }
- tr.cond = sync.NewCond(&tr.mu)
+ tr := ledgercore.MakeVotersForRound()
+ tr.Proto = proto
+
vt.round[r] = tr
vt.loadWaitGroup.Add(1)
go func() {
defer vt.loadWaitGroup.Done()
- err := tr.loadTree(vt.l, vt.au, hdr)
+ onlineAccounts := ledgercore.TopOnlineAccounts(vt.au.onlineTop)
+ err := tr.LoadTree(onlineAccounts, hdr)
if err != nil {
- vt.au.log.Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err)
+ vt.l.trackerLog().Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err)
- tr.mu.Lock()
- tr.loadTreeError = err
- tr.cond.Broadcast()
- tr.mu.Unlock()
+ tr.BroadcastError(err)
}
}()
return
@@ -201,70 +156,6 @@ func (vt *votersTracker) close() {
vt.loadWaitGroup.Wait()
}
-func (tr *VotersForRound) loadTree(l ledgerForTracker, au *accountUpdates, hdr bookkeeping.BlockHeader) error {
- r := hdr.Round
-
- // certRound is the block that we expect to form a compact certificate for,
- // using the balances from round r.
- certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds)
-
- // sigKeyRound is the ephemeral key ID that we expect to be used for signing
- // the block from certRound. It is one higher because the keys for certRound
- // might be deleted by the time consensus is reached on the block and we try
- // to sign the compact cert for block certRound.
- sigKeyRound := certRound + 1
-
- top, err := au.onlineTop(r, sigKeyRound, tr.Proto.CompactCertTopVoters)
- if err != nil {
- return err
- }
-
- participants := make(participantsArray, len(top))
- addrToPos := make(map[basics.Address]uint64)
- var totalWeight basics.MicroAlgos
-
- for i, acct := range top {
- var ot basics.OverflowTracker
- rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel)
- money := ot.AddA(acct.MicroAlgos, rewards)
- if ot.Overflowed {
- return fmt.Errorf("votersTracker.loadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards)
- }
-
- totalWeight = ot.AddA(totalWeight, money)
- if ot.Overflowed {
- return fmt.Errorf("votersTracker.loadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64())
- }
-
- keyDilution := acct.VoteKeyDilution
- if keyDilution == 0 {
- keyDilution = tr.Proto.DefaultKeyDilution
- }
-
- participants[i] = compactcert.Participant{
- PK: acct.VoteID,
- Weight: money.ToUint64(),
- KeyDilution: keyDilution,
- }
- addrToPos[acct.Address] = uint64(i)
- }
-
- tree, err := merklearray.Build(participants)
- if err != nil {
- return err
- }
-
- tr.mu.Lock()
- tr.AddrToPos = addrToPos
- tr.Participants = participants
- tr.TotalWeight = totalWeight
- tr.Tree = tree
- tr.cond.Broadcast()
- tr.mu.Unlock()
-
- return nil
-}
-
func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
proto := config.Consensus[hdr.CurrentProtocol]
if proto.CompactCertRounds == 0 {
@@ -288,7 +179,7 @@ func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
if (r+proto.CompactCertVotersLookback)%proto.CompactCertRounds == 0 {
_, ok := vt.round[basics.Round(r)]
if ok {
- vt.au.log.Errorf("votersTracker.newBlock: round %d already present", r)
+ vt.l.trackerLog().Errorf("votersTracker.newBlock: round %d already present", r)
} else {
vt.loadTree(hdr)
}
@@ -311,7 +202,7 @@ func (vt *votersTracker) lowestRound(base basics.Round) basics.Round {
}
// getVoters() returns the top online participants from round r.
-func (vt *votersTracker) getVoters(r basics.Round) (*VotersForRound, error) {
+func (vt *votersTracker) getVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
tr, ok := vt.round[r]
if !ok {
// Not tracked: compact certs not enabled.
@@ -319,32 +210,10 @@ func (vt *votersTracker) getVoters(r basics.Round) (*VotersForRound, error) {
}
// Wait for the Merkle tree to be constructed.
- tr.mu.Lock()
- defer tr.mu.Unlock()
- for tr.Tree == nil {
- if tr.loadTreeError != nil {
- return nil, tr.loadTreeError
- }
-
- tr.cond.Wait()
+ err := tr.Wait()
+ if err != nil {
+ return nil, err
}
return tr, nil
}
-
-//msgp:ignore participantsArray
-// participantsArray implements merklearray.Array and is used to commit
-// to a Merkle tree of online accounts.
-type participantsArray []compactcert.Participant
-
-func (a participantsArray) Length() uint64 {
- return uint64(len(a))
-}
-
-func (a participantsArray) GetHash(pos uint64) (crypto.Digest, error) {
- if pos >= uint64(len(a)) {
- return crypto.Digest{}, fmt.Errorf("participantsArray.Get(%d) out of bounds %d", pos, len(a))
- }
-
- return crypto.HashObj(a[pos]), nil
-}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index 524ffc293..2563adf6e 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -19,6 +19,7 @@ package libgoal
import (
"encoding/json"
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
@@ -30,6 +31,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/daemon/kmd/lib/kmdapi"
@@ -891,6 +893,40 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r
return
}
+// AddParticipationKey takes a participation key file and sends it to the node.
+// The key will be loaded into the system when the function returns successfully.
+func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
+ data, err := ioutil.ReadFile(keyfile)
+ if err != nil {
+ return
+ }
+
+ algod, err := c.ensureAlgodClient()
+ if err != nil {
+ return
+ }
+
+ return algod.PostParticipationKey(data)
+}
+
+// GetParticipationKeys gets the currently installed participation keys.
+func (c *Client) GetParticipationKeys() (resp generated.ParticipationKeysResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.GetParticipationKeys()
+ }
+ return
+}
+
+// GetParticipationKeyByID looks up a specific participation key by its participationID.
+func (c *Client) GetParticipationKeyByID(id string) (resp generated.ParticipationKeyResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.GetParticipationKeyByID(id)
+ }
+ return
+}
+
// ExportKey exports the private key of the passed account, assuming it's available
func (c *Client) ExportKey(walletHandle []byte, password, account string) (resp kmdapi.APIV1POSTKeyExportResponse, err error) {
kmd, err := c.ensureKmdClient()
@@ -970,18 +1006,18 @@ func (c *Client) Catchup(catchpointLabel string) error {
const defaultAppIdx = 1380011588
// MakeDryrunStateBytes function creates DryrunRequest data structure in serialized form according to the format
-func MakeDryrunStateBytes(client Client, txnOrStxn interface{}, other []transactions.SignedTxn, proto string, format string) (result []byte, err error) {
+func MakeDryrunStateBytes(client Client, txnOrStxn interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string, format string) (result []byte, err error) {
switch format {
case "json":
var gdr generatedV2.DryrunRequest
- gdr, err = MakeDryrunStateGenerated(client, txnOrStxn, other, proto)
+ gdr, err = MakeDryrunStateGenerated(client, txnOrStxn, otherTxns, otherAccts, proto)
if err == nil {
result = protocol.EncodeJSON(&gdr)
}
return
case "msgp":
var dr v2.DryrunRequest
- dr, err = MakeDryrunState(client, txnOrStxn, other, proto)
+ dr, err = MakeDryrunState(client, txnOrStxn, otherTxns, otherAccts, proto)
if err == nil {
result = protocol.EncodeReflect(&dr)
}
@@ -992,8 +1028,8 @@ func MakeDryrunStateBytes(client Client, txnOrStxn interface{}, other []transact
}
// MakeDryrunState function creates v2.DryrunRequest data structure
-func MakeDryrunState(client Client, txnOrStxn interface{}, other []transactions.SignedTxn, proto string) (dr v2.DryrunRequest, err error) {
- gdr, err := MakeDryrunStateGenerated(client, txnOrStxn, other, proto)
+func MakeDryrunState(client Client, txnOrStxn interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string) (dr v2.DryrunRequest, err error) {
+ gdr, err := MakeDryrunStateGenerated(client, txnOrStxn, otherTxns, otherAccts, proto)
if err != nil {
return
}
@@ -1001,20 +1037,27 @@ func MakeDryrunState(client Client, txnOrStxn interface{}, other []transactions.
}
// MakeDryrunStateGenerated function creates generatedV2.DryrunRequest data structure
-func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []transactions.SignedTxn, proto string) (dr generatedV2.DryrunRequest, err error) {
+func MakeDryrunStateGenerated(client Client, txnOrStxnOrSlice interface{}, otherTxns []transactions.SignedTxn, otherAccts []basics.Address, proto string) (dr generatedV2.DryrunRequest, err error) {
var txns []transactions.SignedTxn
- if txnOrStxn == nil {
- // empty input do nothing
- } else if txn, ok := txnOrStxn.(transactions.Transaction); ok {
- txns = append(txns, transactions.SignedTxn{Txn: txn})
- } else if stxn, ok := txnOrStxn.(transactions.SignedTxn); ok {
- txns = append(txns, stxn)
- } else {
- err = fmt.Errorf("unsupported txn type")
- return
+ if txnOrStxnOrSlice != nil {
+ switch txnType := txnOrStxnOrSlice.(type) {
+ case transactions.Transaction:
+ txns = append(txns, transactions.SignedTxn{Txn: txnType})
+ case []transactions.Transaction:
+ for _, t := range txnType {
+ txns = append(txns, transactions.SignedTxn{Txn: t})
+ }
+ case transactions.SignedTxn:
+ txns = append(txns, txnType)
+ case []transactions.SignedTxn:
+ txns = append(txns, txnType...)
+ default:
+ err = fmt.Errorf("unsupported txn type")
+ return
+ }
}
- txns = append(txns, other...)
+ txns = append(txns, otherTxns...)
for i := range txns {
enc := protocol.EncodeJSON(&txns[i])
dr.Txns = append(dr.Txns, enc)
@@ -1023,6 +1066,9 @@ func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []tran
for _, txn := range txns {
tx := txn.Txn
if tx.Type == protocol.ApplicationCallTx {
+ accounts := append(tx.Accounts, tx.Sender)
+ accounts = append(accounts, otherAccts...)
+
apps := []basics.AppIndex{tx.ApplicationID}
apps = append(apps, tx.ForeignApps...)
for _, appIdx := range apps {
@@ -1049,6 +1095,7 @@ func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []tran
return
}
appParams = app.Params
+ accounts = append(accounts, appIdx.Address())
}
dr.Apps = append(dr.Apps, generatedV2.Application{
Id: uint64(appIdx),
@@ -1056,11 +1103,11 @@ func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []tran
})
}
- accounts := append(tx.Accounts, tx.Sender)
for _, acc := range accounts {
var info generatedV2.Account
if info, err = client.AccountInformationV2(acc.String()); err != nil {
- return
+ // ignore error - accounts might have app addresses that were not funded
+ continue
}
dr.Accounts = append(dr.Accounts, info)
}
diff --git a/libgoal/participation.go b/libgoal/participation.go
index c95d4c3c6..66ba9e4a5 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -24,6 +24,7 @@ import (
"path/filepath"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
@@ -166,7 +167,7 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
// Fill the database with new participation keys
newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
part = newPart.Participation
- newPart.Close()
+ partdb.Close()
return part, partKeyPath, err
}
@@ -243,8 +244,18 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic
}
// ListParticipationKeys returns the available participation keys,
+// as a response object.
+func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKeysResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ partKeyFiles, err = algod.GetParticipationKeys()
+ }
+ return
+}
+
+// ListParticipationKeyFiles returns the available participation keys,
// as a map from database filename to Participation key object.
-func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Participation, err error) {
+func (c *Client) ListParticipationKeyFiles() (partKeyFiles map[string]account.Participation, err error) {
genID, err := c.GenesisID()
if err != nil {
return
diff --git a/logging/testingLogger.go b/logging/testingLogger.go
index bbdb0f32a..09b789fb0 100644
--- a/logging/testingLogger.go
+++ b/logging/testingLogger.go
@@ -22,7 +22,7 @@ import (
// TestLogWriter is an io.Writer that wraps a testing.T (or a testing.B) -- anything written to it gets logged with t.Log(...)
// Being an io.Writer lets us pass it to Logger.SetOutput() in testing code -- this way if we want we can use Go's built-in testing log instead of making a new base.log file for each test.
-// As a bonus, the detailed logs produced in a Travis test are now easily accessible and are printed if and only if that particular ttest fails.
+// As a bonus, the detailed logs produced in a Travis test are now easily accessible and are printed if and only if that particular test fails.
type TestLogWriter struct {
testing.TB
}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 36b4618f5..1bf4184a6 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util"
@@ -487,9 +488,9 @@ func keypair() *crypto.SignatureSecrets {
return s
}
-func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledger.InitState, error) {
+func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledgercore.InitState, error) {
- var initState ledger.InitState
+ var initState ledgercore.InitState
block := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
diff --git a/network/phonebook_test.go b/network/phonebook_test.go
index bd8e712bf..69a971bfe 100644
--- a/network/phonebook_test.go
+++ b/network/phonebook_test.go
@@ -240,7 +240,12 @@ func TestMultiPhonebookDuplicateFiltering(t *testing.T) {
func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
partitiontest.PartitionTest(t)
- entries := MakePhonebook(3, 200*time.Millisecond).(*phonebookImpl)
+ // make the connectionsRateLimitingWindow long enough to avoid triggering it when the
+ // test is running in a slow environment
+ // The test will artificially simulate time passing
+ timeUnit := 2000 * time.Second
+ connectionsRateLimitingWindow := 2 * timeUnit
+ entries := MakePhonebook(3, connectionsRateLimitingWindow).(*phonebookImpl)
addr1 := "addrABC"
addr2 := "addrXYZ"
@@ -259,8 +264,10 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
phBookData := entries.data[addr1].recentConnectionTimes
require.Equal(t, 1, len(phBookData))
- // introduce a gap between the two requests
- time.Sleep(100 * time.Millisecond)
+ // simulate passing a unit of time
+ for rct := range entries.data[addr1].recentConnectionTimes {
+ entries.data[addr1].recentConnectionTimes[rct] = entries.data[addr1].recentConnectionTimes[rct].Add(-1 * timeUnit)
+ }
// add another value to addr
addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
@@ -269,8 +276,11 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
phBookData = entries.data[addr1].recentConnectionTimes
require.Equal(t, 2, len(phBookData))
- // wait for the time the first element should be removed
- time.Sleep(100 * time.Millisecond)
+ // simulate passing a unit of time
+ for rct := range entries.data[addr1].recentConnectionTimes {
+ entries.data[addr1].recentConnectionTimes[rct] =
+ entries.data[addr1].recentConnectionTimes[rct].Add(-1 * timeUnit)
+ }
// the first time should be removed and a new one added
// there should not be any wait
@@ -294,7 +304,11 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
// introduce a gap between the two requests so that only the first will be removed later when waited
- time.Sleep(100 * time.Millisecond)
+ // simulate passing a unit of time
+ for rct := range entries.data[addr2].recentConnectionTimes {
+ entries.data[addr2].recentConnectionTimes[rct] =
+ entries.data[addr2].recentConnectionTimes[rct].Add(-1 * timeUnit)
+ }
// value 2
addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
@@ -318,7 +332,11 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
require.Equal(t, phBookData[1], phBookData2[1])
require.Equal(t, phBookData[2], phBookData2[2])
- time.Sleep(waitTime)
+ // simulate passing of the waitTime duration
+ for rct := range entries.data[addr2].recentConnectionTimes {
+ entries.data[addr2].recentConnectionTimes[rct] =
+ entries.data[addr2].recentConnectionTimes[rct].Add(-1 * waitTime)
+ }
// The wait should be sufficient
_, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
@@ -326,7 +344,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
// only one element should be removed, and one added
phBookData2 = entries.data[addr2].recentConnectionTimes
- require.Equal(t, 3, len(phBookData))
+ require.Equal(t, 3, len(phBookData2))
// make sure the right time was removed
require.Equal(t, phBookData[1], phBookData2[0])
diff --git a/network/ping.go b/network/ping.go
deleted file mode 100644
index 064bb5ff9..000000000
--- a/network/ping.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package network
-
-import (
- "bytes"
- "context"
- "time"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/protocol"
-)
-
-func pingHandler(message IncomingMessage) OutgoingMessage {
- if len(message.Data) > 8 {
- return OutgoingMessage{}
- }
- message.Net.(*WebsocketNetwork).log.Debugf("ping from peer %#v", message.Sender.(*wsPeer).wsPeerCore)
- peer := message.Sender.(*wsPeer)
- tbytes := []byte(protocol.PingReplyTag)
- mbytes := make([]byte, len(tbytes)+len(message.Data))
- copy(mbytes, tbytes)
- copy(mbytes[len(tbytes):], message.Data)
- var digest crypto.Digest // leave blank, ping message too short
- peer.writeNonBlock(context.Background(), mbytes, false, digest, time.Now())
- return OutgoingMessage{}
-}
-
-func pingReplyHandler(message IncomingMessage) OutgoingMessage {
- log := message.Net.(*WebsocketNetwork).log
- now := time.Now()
- peer := message.Sender.(*wsPeer)
- peer.pingLock.Lock()
- defer peer.pingLock.Unlock()
- if !peer.pingInFlight {
- log.Infof("ping reply with non in flight from %s", peer.rootURL)
- return OutgoingMessage{}
- }
- if len(peer.pingData) != len(message.Data) {
- log.Infof("ping reply with wrong length want %d got %d, from %s", len(peer.pingData), len(message.Data), peer.rootURL)
- return OutgoingMessage{}
- }
- if 0 != bytes.Compare(peer.pingData, message.Data) {
- log.Infof("ping reply with wrong data from %s", peer.rootURL)
- return OutgoingMessage{}
- }
- peer.pingInFlight = false
- peer.lastPingRoundTripTime = now.Sub(peer.pingSent)
- log.Debugf("ping returned in %s from %s", peer.lastPingRoundTripTime, message.Sender.(*wsPeer).rootURL)
- return OutgoingMessage{}
-}
-
-var pingHandlers = []TaggedMessageHandler{
- {protocol.PingTag, HandlerFunc(pingHandler)},
- {protocol.PingReplyTag, HandlerFunc(pingReplyHandler)},
-}
diff --git a/network/ping_test.go b/network/ping_test.go
deleted file mode 100644
index 85b1ef2c3..000000000
--- a/network/ping_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package network
-
-import (
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-// for two node network, check that B can ping A and get a reply
-func TestPing(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- netA := makeTestWebsocketNode(t)
- netA.config.GossipFanout = 1
- netA.config.PeerPingPeriodSeconds = 5
- netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
- netB := makeTestWebsocketNode(t)
- netB.config.GossipFanout = 1
- netB.config.PeerPingPeriodSeconds = 5
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook = MakePhonebook(1, 1*time.Millisecond)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- t.Log("a ready")
- waitReady(t, netB, readyTimeout.C)
- t.Log("b ready")
-
- bpeers := netB.GetPeers(PeersConnectedOut)
- require.Equal(t, 1, len(bpeers))
-
- peer := bpeers[0].(*wsPeer)
- prePing := time.Now()
- peer.sendPing()
- const waitStep = 10 * time.Millisecond
- for i := 1; i <= 100; i++ {
- time.Sleep(waitStep)
- _, lastPingRoundTripTime := peer.pingTimes()
- if lastPingRoundTripTime > 0 {
- postPing := time.Now()
- testTime := postPing.Sub(prePing)
- if lastPingRoundTripTime < testTime {
- // success
- return
- }
- t.Fatalf("ping came back with bogus time %s after %s test waiting", lastPingRoundTripTime, testTime)
- }
- }
- t.FailNow()
-}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index ace79c5b0..9f4a1b281 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io/ioutil"
- "math"
"net"
"net/http"
"net/textproto"
@@ -31,7 +30,6 @@ import (
"path"
"regexp"
"runtime"
- "sort"
"strconv"
"strings"
"sync"
@@ -425,6 +423,9 @@ func (wn *WebsocketNetwork) Address() (string, bool) {
parsedURL := url.URL{Scheme: wn.scheme}
var connected bool
if wn.listener == nil {
+ if wn.config.NetAddress == "" {
+ parsedURL.Scheme = ""
+ }
parsedURL.Host = wn.config.NetAddress
connected = false
} else {
@@ -779,9 +780,6 @@ func (wn *WebsocketNetwork) Start() {
wn.scheme = "http"
}
wn.meshUpdateRequests <- meshRequest{false, nil}
- if wn.config.EnablePingHandler {
- wn.RegisterHandlers(pingHandlers)
- }
if wn.prioScheme != nil {
wn.RegisterHandlers(prioHandlers)
}
@@ -791,10 +789,7 @@ func (wn *WebsocketNetwork) Start() {
}
wn.wg.Add(1)
go wn.meshThread()
- if wn.config.PeerPingPeriodSeconds > 0 {
- wn.wg.Add(1)
- go wn.pingThread()
- }
+
// we shouldn't have any ticker here.. but in case we do - just stop it.
if wn.peersConnectivityCheckTicker != nil {
wn.peersConnectivityCheckTicker.Stop()
@@ -1761,81 +1756,6 @@ func (wn *WebsocketNetwork) prioWeightRefresh() {
}
}
-// Wake up the thread to do work this often.
-const pingThreadPeriod = 30 * time.Second
-
-// If ping stats are older than this, don't include in metrics.
-const maxPingAge = 30 * time.Minute
-
-// pingThread wakes up periodically to refresh the ping times on peers and update the metrics gauges.
-func (wn *WebsocketNetwork) pingThread() {
- defer wn.wg.Done()
- ticker := time.NewTicker(pingThreadPeriod)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- case <-wn.ctx.Done():
- return
- }
- sendList := wn.peersToPing()
- wn.log.Debugf("ping %d peers...", len(sendList))
- for _, peer := range sendList {
- if !peer.sendPing() {
- // if we failed to send a ping, see how long it was since last successful ping.
- lastPingSent, _ := peer.pingTimes()
- wn.log.Infof("failed to ping to %v for the past %f seconds", peer, time.Now().Sub(lastPingSent).Seconds())
- }
- }
- }
-}
-
-// Walks list of peers, gathers list of peers to ping, also calculates statistics.
-func (wn *WebsocketNetwork) peersToPing() []*wsPeer {
- wn.peersLock.RLock()
- defer wn.peersLock.RUnlock()
- // Never flood outbound traffic by trying to ping all the peers at once.
- // Send to at most one fifth of the peers.
- maxSend := 1 + (len(wn.peers) / 5)
- out := make([]*wsPeer, 0, maxSend)
- now := time.Now()
- // a list to sort to find median
- times := make([]float64, 0, len(wn.peers))
- var min = math.MaxFloat64
- var max float64
- var sum float64
- pingPeriod := time.Duration(wn.config.PeerPingPeriodSeconds) * time.Second
- for _, peer := range wn.peers {
- lastPingSent, lastPingRoundTripTime := peer.pingTimes()
- sendToNow := now.Sub(lastPingSent)
- if (sendToNow > pingPeriod) && (len(out) < maxSend) {
- out = append(out, peer)
- }
- if (lastPingRoundTripTime > 0) && (sendToNow < maxPingAge) {
- ftime := lastPingRoundTripTime.Seconds()
- sum += ftime
- times = append(times, ftime)
- if ftime < min {
- min = ftime
- }
- if ftime > max {
- max = ftime
- }
- }
- }
- if len(times) != 0 {
- sort.Float64s(times)
- median := times[len(times)/2]
- medianPing.Set(median, nil)
- mean := sum / float64(len(times))
- meanPing.Set(mean, nil)
- minPing.Set(min, nil)
- maxPing.Set(max, nil)
- wn.log.Infof("ping times min=%f mean=%f median=%f max=%f", min, mean, median, max)
- }
- return out
-}
-
func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses []string, archiverAddresses []string) {
var err error
relaysAddresses, err = tools_network.ReadFromSRV("algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index d1d719b36..74c690241 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -614,6 +614,12 @@ func (nc *nopConn) SetReadLimit(limit int64) {
func (nc *nopConn) CloseWithoutFlush() error {
return nil
}
+func (nc *nopConn) SetPingHandler(h func(appData string) error) {
+
+}
+func (nc *nopConn) SetPongHandler(h func(appData string) error) {
+
+}
var nopConnSingleton = nopConn{}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index f9e5e1dc1..f476cfa7e 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -90,6 +90,8 @@ type wsPeerWebsocketConn interface {
WriteControl(int, []byte, time.Time) error
SetReadLimit(int64)
CloseWithoutFlush() error
+ SetPingHandler(h func(appData string) error)
+ SetPongHandler(h func(appData string) error)
}
type sendMessage struct {
@@ -136,7 +138,7 @@ type wsPeer struct {
// lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer.
// "successful communication" above refers to either reading from or writing to a connection without receiving any
// error.
- // we want this to be a 64-bit aligned for atomics.
+ // we want this to be a 64-bit aligned for atomics support on 32bit platforms.
lastPacketTime int64
// intermittentOutgoingMessageEnqueueTime contains the UnixNano of the message's enqueue time that is currently being written to the
@@ -418,6 +420,7 @@ func (wp *wsPeer) readLoop() {
wp.reportReadErr(err)
return
}
+
msg.processing = wp.processed
msg.Received = time.Now().UnixNano()
msg.Data = slurper.Bytes()
@@ -597,12 +600,14 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
}
// check if this message was waiting in the queue for too long. If this is the case, return "true" to indicate that we want to close the connection.
- msgWaitDuration := time.Now().Sub(msg.enqueued)
+ now := time.Now()
+ msgWaitDuration := now.Sub(msg.enqueued)
if msgWaitDuration > maxMessageQueueDuration {
wp.net.log.Warnf("peer stale enqueued message %dms", msgWaitDuration.Nanoseconds()/1000000)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "stale message"})
return disconnectStaleWrite
}
+
atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, msg.enqueued.UnixNano())
defer atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, 0)
err := wp.conn.WriteMessage(websocket.BinaryMessage, msg.data)
diff --git a/node/assemble_test.go b/node/assemble_test.go
index f094c8dd9..5f7dae7d5 100644
--- a/node/assemble_test.go
+++ b/node/assemble_test.go
@@ -70,7 +70,6 @@ func BenchmarkAssembleBlock(b *testing.B) {
Status: basics.Online,
MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
}
- //b.Log(addr)
}
genesis[poolAddr] = basics.AccountData{
diff --git a/node/impls.go b/node/impls.go
index 67f9963a0..d7ced370b 100644
--- a/node/impls.go
+++ b/node/impls.go
@@ -114,8 +114,8 @@ func (l agreementLedger) EnsureDigest(cert agreement.Certificate, verifier *agre
}
// Wrapping error with a LedgerDroppedRoundError when an old round is requested but the ledger has already dropped the entry
-func (l agreementLedger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
- record, err := l.Ledger.Lookup(rnd, addr)
+func (l agreementLedger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ record, err := l.Ledger.LookupAgreement(rnd, addr)
var e *ledger.RoundOffsetError
if errors.As(err, &e) {
err = &agreement.LedgerDroppedRoundError{
diff --git a/node/netprio.go b/node/netprio.go
index c65db60d4..d3a4e99b6 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -80,12 +80,12 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
voteRound := latest + 2
for _, part := range node.accountManager.Keys(voteRound) {
parent := part.Address()
- data, err := node.ledger.Lookup(latest, parent)
+ data, err := node.ledger.LookupAgreement(latest, parent)
if err != nil {
continue
}
- weight := data.MicroAlgos.ToUint64()
+ weight := data.MicroAlgosWithRewards.ToUint64()
if weight > maxWeight {
maxPart = part
maxWeight = weight
@@ -125,7 +125,7 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
return
}
- data, err := node.ledger.Lookup(balanceRound, rs.Sender)
+ data, err := node.ledger.LookupAgreement(balanceRound, rs.Sender)
if err != nil {
return
}
@@ -143,10 +143,10 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
// GetPrioWeight implements the network.NetPrioScheme interface
func (node *AlgorandFullNode) GetPrioWeight(addr basics.Address) uint64 {
latest := node.ledger.LastRound()
- data, err := node.ledger.Lookup(latest, addr)
+ data, err := node.ledger.LookupAgreement(latest, addr)
if err != nil {
return 0
}
- return data.MicroAlgos.ToUint64()
+ return data.MicroAlgosWithRewards.ToUint64()
}
diff --git a/node/node.go b/node/node.go
index e0d2f437f..e5a2f7e6a 100644
--- a/node/node.go
+++ b/node/node.go
@@ -19,10 +19,12 @@ package node
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"sync"
"time"
@@ -53,6 +55,7 @@ import (
"github.com/algorand/go-algorand/util/metrics"
"github.com/algorand/go-algorand/util/timers"
"github.com/algorand/go-deadlock"
+ uuid "github.com/satori/go.uuid"
)
// StatusReport represents the current basic status of the node
@@ -177,7 +180,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
}
p2pNode.SetPrioScheme(node)
node.net = p2pNode
- node.accountManager = data.MakeAccountManager(log)
accountListener := makeTopAccountListener(log)
@@ -267,6 +269,13 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates, node.lowPriorityCryptoVerificationPool)
node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize)
+ registry, err := ensureParticipationDB(genesisDir, node.log)
+ if err != nil {
+ log.Errorf("unable to initialize the participation registry database: %v", err)
+ return nil, err
+ }
+ node.accountManager = data.MakeAccountManager(log, registry)
+
err = node.loadParticipationKeys()
if err != nil {
log.Errorf("Cannot load participation keys: %v", err)
@@ -394,6 +403,7 @@ func (node *AlgorandFullNode) Start() {
func (node *AlgorandFullNode) startMonitoringRoutines() {
node.monitoringRoutinesWaitGroup.Add(3)
+ // PKI TODO: Remove this with #2596
// Periodically check for new participation keys
go node.checkForParticipationKeys()
@@ -473,7 +483,7 @@ func (node *AlgorandFullNode) Ledger() *data.Ledger {
// writeDevmodeBlock generates a new block for a devmode, and write it to the ledger.
func (node *AlgorandFullNode) writeDevmodeBlock() (err error) {
- var vb *ledger.ValidatedBlock
+ var vb *ledgercore.ValidatedBlock
vb, err = node.transactionPool.AssembleDevModeBlock()
if err != nil || vb == nil {
return
@@ -742,6 +752,16 @@ func (node *AlgorandFullNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn
return bookkeeping.SignedTxnGroupsFlatten(node.transactionPool.PendingTxGroups()), nil
}
+// ensureParticipationDB opens or creates a participation DB.
+func ensureParticipationDB(genesisDir string, log logging.Logger) (account.ParticipationRegistry, error) {
+ accessorFile := filepath.Join(genesisDir, config.ParticipationRegistryFilename)
+ accessor, err := db.OpenPair(accessorFile, false)
+ if err != nil {
+ return nil, err
+ }
+ return account.MakeParticipationRegistry(accessor, log)
+}
+
// Reload participation keys from disk periodically
func (node *AlgorandFullNode) checkForParticipationKeys() {
defer node.monitoringRoutinesWaitGroup.Done()
@@ -760,6 +780,149 @@ func (node *AlgorandFullNode) checkForParticipationKeys() {
}
}
+// ListParticipationKeys returns all participation keys currently installed on the node
+func (node *AlgorandFullNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) {
+ return node.accountManager.Registry().GetAll(), nil
+}
+
+// GetParticipationKey retries the information of a participation id from the node
+func (node *AlgorandFullNode) GetParticipationKey(partKey account.ParticipationID) (account.ParticipationRecord, error) {
+ rval := node.accountManager.Registry().Get(partKey)
+
+ if rval.IsZero() {
+ return account.ParticipationRecord{}, account.ErrParticipationIDNotFound
+ }
+
+ return node.accountManager.Registry().Get(partKey), nil
+}
+
+// RemoveParticipationKey given a participation id, remove the records from the node
+func (node *AlgorandFullNode) RemoveParticipationKey(partKey account.ParticipationID) error {
+
+ // Need to remove the file and then remove the entry in the registry
+ // Let's first get the recorded information from the registry so we can lookup the file
+
+ partRecord := node.accountManager.Registry().Get(partKey)
+
+ if partRecord.IsZero() {
+ return account.ErrParticipationIDNotFound
+ }
+
+ genID := node.GenesisID()
+
+ outDir := filepath.Join(node.rootDir, genID)
+
+ filename := config.PartKeyFilename(partRecord.ParticipationID.String(), uint64(partRecord.FirstValid), uint64(partRecord.LastValid))
+ fullyQualifiedFilename := filepath.Join(outDir, filepath.Base(filename))
+
+ err := node.accountManager.Registry().Delete(partKey)
+ if err != nil {
+ return err
+ }
+
+ // PKI TODO: pick a better timeout, this is just something short. This could also be removed if we change
+ // POST /v2/participation and DELETE /v2/participation to return "202 OK Accepted" instead of waiting and getting
+ // the error message.
+ err = node.accountManager.Registry().Flush(500 * time.Millisecond)
+ if err != nil {
+ return err
+ }
+
+ // Only after deleting and flushing do we want to remove the file
+ _ = os.Remove(fullyQualifiedFilename)
+
+ return nil
+}
+
+func createTemporaryParticipationKey(outDir string, partKeyBinary []byte) (string, error) {
+ var sb strings.Builder
+
+ // Create a temporary filename with a UUID so that we can call this function twice
+ // in a row without worrying about collisions
+ sb.WriteString("tempPartKeyBinary.")
+ sb.WriteString(uuid.NewV4().String())
+ sb.WriteString(".bin")
+
+ tempFile := filepath.Join(outDir, filepath.Base(sb.String()))
+
+ file, err := os.Create(tempFile)
+
+ if err != nil {
+ return "", err
+ }
+
+ _, err = file.Write(partKeyBinary)
+
+ file.Close()
+
+ if err != nil {
+ os.Remove(tempFile)
+ return "", err
+ }
+
+ return tempFile, nil
+}
+
+// InstallParticipationKey Given a participation key binary stream install the participation key.
+func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
+ genID := node.GenesisID()
+
+ outDir := filepath.Join(node.rootDir, genID)
+
+ fullyQualifiedTempFile, err := createTemporaryParticipationKey(outDir, partKeyBinary)
+ // We need to make sure no tempfile is created/remains if there is an error
+ // However, we will eventually rename this file but if we fail in-between
+ // this point and the rename we want to ensure that we remove the temporary file
+ // After we rename, this will fail anyway since the file will not exist
+
+ // Explicitly ignore the error with a closure
+ defer func(name string) {
+ _ = os.Remove(name)
+ }(fullyQualifiedTempFile)
+
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+
+ inputdb, err := db.MakeErasableAccessor(fullyQualifiedTempFile)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+ defer inputdb.Close()
+
+ partkey, err := account.RestoreParticipation(inputdb)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+ defer partkey.Close()
+
+ if partkey.Parent == (basics.Address{}) {
+ return account.ParticipationID{}, fmt.Errorf("cannot install partkey with missing (zero) parent address")
+ }
+
+ // Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value
+ _ = node.accountManager.AddParticipation(partkey)
+
+ // PKI TODO: pick a better timeout, this is just something short. This could also be removed if we change
+ // POST /v2/participation and DELETE /v2/participation to return "202 OK Accepted" instead of waiting and getting
+ // the error message.
+ err = node.accountManager.Registry().Flush(500 * time.Millisecond)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+
+ newFilename := config.PartKeyFilename(partkey.ID().String(), uint64(partkey.FirstValid), uint64(partkey.LastValid))
+ newFullyQualifiedFilename := filepath.Join(outDir, filepath.Base(newFilename))
+
+ err = os.Rename(fullyQualifiedTempFile, newFullyQualifiedFilename)
+
+ if err != nil {
+ return account.ParticipationID{}, nil
+ }
+
+ return partkey.ID(), nil
+}
+
func (node *AlgorandFullNode) loadParticipationKeys() error {
// Generate a list of all potential participation key files
genesisDir := filepath.Join(node.rootDir, node.genesisID)
@@ -781,7 +944,7 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
if err != nil {
if db.IsErrBusy(err) {
// this is a special case:
- // we might get "database is locked" when we attempt to access a database that is conurrently updates it's participation keys.
+ // we might get "database is locked" when we attempt to access a database that is concurrently updating its participation keys.
// that database is clearly already on the account manager, and doesn't need to be processed through this logic, and therefore
// we can safely ignore that fail case.
continue
@@ -913,6 +1076,10 @@ func (node *AlgorandFullNode) oldKeyDeletionThread() {
node.mu.Lock()
node.accountManager.DeleteOldKeys(latestHdr, ccSigs, agreementProto)
node.mu.Unlock()
+
+ // PKI TODO: Maybe we don't even need to flush the registry.
+ // Persist participation registry metrics.
+ node.accountManager.FlushRegistry(2 * time.Second)
}
}
@@ -1067,7 +1234,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
// validatedBlock satisfies agreement.ValidatedBlock
type validatedBlock struct {
- vb *ledger.ValidatedBlock
+ vb *ledgercore.ValidatedBlock
}
// WithSeed satisfies the agreement.ValidatedBlock interface.
@@ -1083,10 +1250,11 @@ func (vb validatedBlock) Block() bookkeeping.Block {
}
// AssembleBlock implements Ledger.AssembleBlock.
-func (node *AlgorandFullNode) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (node *AlgorandFullNode) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
+ deadline := time.Now().Add(node.config.ProposalAssemblyTime)
lvb, err := node.transactionPool.AssembleBlock(round, deadline)
if err != nil {
- if err == pools.ErrStaleBlockAssemblyRequest {
+ if errors.Is(err, pools.ErrStaleBlockAssemblyRequest) {
// convert specific error to one that would have special handling in the agreement code.
err = agreement.ErrAssembleBlockRoundStale
@@ -1112,7 +1280,7 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
keys := node.accountManager.Keys(votingRound)
participations := make([]account.Participation, 0, len(keys))
- accountsData := make(map[basics.Address]basics.AccountData, len(keys))
+ accountsData := make(map[basics.Address]basics.OnlineAccountData, len(keys))
matchingAccountsKeys := make(map[basics.Address]bool)
mismatchingAccountsKeys := make(map[basics.Address]int)
const bitMismatchingVotingKey = 1
@@ -1121,7 +1289,7 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
acctData, hasAccountData := accountsData[part.Parent]
if !hasAccountData {
var err error
- acctData, _, err = node.ledger.LookupWithoutRewards(keysRound, part.Parent)
+ acctData, err = node.ledger.LookupAgreement(keysRound, part.Parent)
if err != nil {
node.log.Warnf("node.VotingKeys: Account %v not participating: cannot locate account for round %d : %v", part.Address(), keysRound, err)
continue
@@ -1139,6 +1307,12 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
}
participations = append(participations, part)
matchingAccountsKeys[part.Address()] = true
+
+ // Make sure the key is registered.
+ err := node.accountManager.Registry().Register(part.ID(), votingRound)
+ if err != nil {
+ node.log.Warnf("Failed to register participation key (%s) with participation registry: %v\n", part.ID(), err)
+ }
}
// write the warnings per account only if we couldn't find a single valid key for that account.
for mismatchingAddr, warningFlags := range mismatchingAccountsKeys {
@@ -1156,3 +1330,8 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
}
return participations
}
+
+// Record forwards participation record calls to the participation registry.
+func (node *AlgorandFullNode) Record(account basics.Address, round basics.Round, participationType account.ParticipationAction) {
+ node.accountManager.Record(account, round, participationType)
+}
diff --git a/node/node_test.go b/node/node_test.go
index 411bb3360..bfd33a8f0 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -129,10 +129,10 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
panic(err)
}
part, err := account.FillDBWithParticipationKeys(access, root.Address(), firstRound, lastRound, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
- access.Close()
if err != nil {
panic(err)
}
+ access.Close()
data := basics.AccountData{
Status: basics.Online,
@@ -507,3 +507,49 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
require.NoError(t, os.Chmod(testDirectroy, 1700))
require.NoError(t, os.RemoveAll(testDirectroy))
}
+
+func TestAsyncRecord(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testDirectroy, err := ioutil.TempDir(os.TempDir(), t.Name())
+ require.NoError(t, err)
+
+ genesis := bookkeeping.Genesis{
+ SchemaID: "go-test-node-record-async",
+ Proto: protocol.ConsensusCurrentVersion,
+ Network: config.Devtestnet,
+ FeeSink: sinkAddr.String(),
+ RewardsPool: poolAddr.String(),
+ }
+
+ cfg := config.GetDefaultLocal()
+ cfg.DisableNetworking = true
+ node, err := MakeFull(logging.TestingLog(t), testDirectroy, config.GetDefaultLocal(), []string{}, genesis)
+ require.NoError(t, err)
+ node.Start()
+ defer node.Stop()
+
+ var addr basics.Address
+ addr[0] = 1
+
+ p := account.Participation{
+ Parent: addr,
+ FirstValid: 0,
+ LastValid: 1000000,
+ Voting: &crypto.OneTimeSignatureSecrets{},
+ VRF: &crypto.VRFSecrets{},
+ }
+ id, err := node.accountManager.Registry().Insert(p)
+ require.NoError(t, err)
+ err = node.accountManager.Registry().Register(id, 0)
+ require.NoError(t, err)
+
+ node.Record(addr, 10000, account.Vote)
+ node.Record(addr, 20000, account.BlockProposal)
+
+ time.Sleep(5000 * time.Millisecond)
+ records := node.accountManager.Registry().GetAll()
+ require.Len(t, records, 1)
+ require.Equal(t, 10000, int(records[0].LastVote))
+ require.Equal(t, 20000, int(records[0].LastBlockProposal))
+}
diff --git a/protocol/hash.go b/protocol/hash.go
index 846b03c27..e17114679 100644
--- a/protocol/hash.go
+++ b/protocol/hash.go
@@ -49,6 +49,7 @@ const (
Program HashID = "Program"
ProgramData HashID = "ProgData"
ProposerSeed HashID = "PS"
+ ParticipationKeys HashID = "PK"
Seed HashID = "SD"
SpecialAddr HashID = "SpecialAddr"
SignedTxnInBlock HashID = "STIB"
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index 2df26223c..2e77aba8e 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -23,6 +23,7 @@ import (
"path"
"strconv"
"strings"
+ "sync"
"github.com/gorilla/mux"
@@ -72,6 +73,7 @@ type BlockService struct {
fallbackEndpoints fallbackEndpoints
enableArchiverFallback bool
log logging.Logger
+ closeWaitGroup sync.WaitGroup
}
// EncodedBlockCert defines how GetBlockBytes encodes a block and its certificate
@@ -125,12 +127,14 @@ func (bs *BlockService) Start() {
bs.net.RegisterHandlers(handlers)
}
bs.stop = make(chan struct{})
+ bs.closeWaitGroup.Add(1)
go bs.listenForCatchupReq(bs.catchupReqs, bs.stop)
}
// Stop servicing catchup requests over ws
func (bs *BlockService) Stop() {
close(bs.stop)
+ bs.closeWaitGroup.Wait()
}
// ServerHTTP returns blocks
@@ -237,6 +241,7 @@ func (bs *BlockService) processIncomingMessage(msg network.IncomingMessage) (n n
// listenForCatchupReq handles catchup getblock request
func (bs *BlockService) listenForCatchupReq(reqs <-chan network.IncomingMessage, stop chan struct{}) {
+ defer bs.closeWaitGroup.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index a48217b13..542e8783e 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -50,6 +50,11 @@ func (mup *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protoc
func (mup *mockUnicastPeer) Version() string {
return "2.1"
}
+
+// GetConnectionLatency returns the connection latency between the local node and this peer.
+func (mup *mockUnicastPeer) GetConnectionLatency() time.Duration {
+ return time.Duration(0)
+}
func (mup *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics network.Topics) (resp *network.Response, e error) {
return nil, nil
}
diff --git a/scripts/archtype.sh b/scripts/archtype.sh
index 6c7c6a8f2..cc241c82f 100755
--- a/scripts/archtype.sh
+++ b/scripts/archtype.sh
@@ -1,5 +1,10 @@
#!/usr/bin/env bash
+if [ ! -z "${GOHOSTARCH+x}" ]; then
+ echo "${GOHOSTARCH}"
+ exit 0
+fi
+
ARCH=$(uname -m)
if [[ "${ARCH}" = "x86_64" ]]; then
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
index 453bbaf70..78c42cd35 100755
--- a/scripts/install_linux_deps.sh
+++ b/scripts/install_linux_deps.sh
@@ -5,7 +5,7 @@ set -e
DISTRIB=$ID
ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv"
-UBUNTU_DEPS="libboost-all-dev expect jq autoconf shellcheck sqlite3 python3-venv"
+UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv"
FEDORA_DEPS="boost-devel expect jq autoconf ShellCheck sqlite python-virtualenv"
if [ "${DISTRIB}" = "arch" ]; then
diff --git a/scripts/ostype.sh b/scripts/ostype.sh
index f3f16a45b..bf2446213 100755
--- a/scripts/ostype.sh
+++ b/scripts/ostype.sh
@@ -1,5 +1,10 @@
#!/usr/bin/env bash
+if [ ! -z "${GOHOSTOS+x}" ]; then
+ echo "${GOHOSTOS}"
+ exit 0
+fi
+
UNAME=$(uname)
if [ "${UNAME}" = "Darwin" ]; then
diff --git a/scripts/release/mule/Makefile.mule b/scripts/release/mule/Makefile.mule
index 627e7fe21..95cc465eb 100644
--- a/scripts/release/mule/Makefile.mule
+++ b/scripts/release/mule/Makefile.mule
@@ -4,7 +4,7 @@ PKG_DIR = $(SRCPATH)/tmp/node_pkgs/$(OS_TYPE)/$(ARCH)
.PHONY: ci-clean ci-setup ci-build
-ci-clean:
+ci-clean: clean
rm -rf tmp
ci-setup:
@@ -28,7 +28,7 @@ ci-integration:
SRCROOT=$(SRCPATH) \
test/scripts/e2e.sh -c $(CHANNEL) -n
-ci-build: buildsrc gen ci-setup
+ci-build: ci-clean buildsrc gen ci-setup
CHANNEL=$(CHANNEL) PKG_ROOT=$(PKG_DIR) NO_BUILD=True VARIATIONS=$(OS_TYPE)-$(ARCH) \
scripts/build_packages.sh $(OS_TYPE)/$(ARCH) && \
mkdir -p $(PKG_DIR)/data && \
diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh
index 860341609..3087f63d7 100755
--- a/scripts/travis/build.sh
+++ b/scripts/travis/build.sh
@@ -46,9 +46,10 @@ ARCH=$("${SCRIPTPATH}/../archtype.sh")
# Get the go build version.
if [ -z "${SKIP_GO_INSTALLATION}" ]; then
GOLANG_VERSION=$(./scripts/get_golang_version.sh)
- curl -sL -o ~/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
- chmod +x ~/gimme
- eval "$(~/gimme "${GOLANG_VERSION}")"
+ GIMME_PATH="${GIMME_INSTALL_DIR:-${HOME}}/gimme"
+ curl -sL -o "${GIMME_PATH}" https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+ chmod +x "${GIMME_PATH}"
+ eval "$("${GIMME_PATH}" "${GOLANG_VERSION}")"
fi
# travis sometimes fail to download a dependency. trying multiple times might help.
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 395c85e90..ae5887366 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -50,6 +50,8 @@ function runGoLint() {
echo >&2 "golint must be clean. Please run the following to list issues(${warningCount}):"
echo >&2 " make lint"
+ # run the linter again to output the actual issues
+ "$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) >&2
return 1
}
diff --git a/scripts/travis/deploy_packages.sh b/scripts/travis/deploy_packages.sh
index c98b95730..e7e517394 100755
--- a/scripts/travis/deploy_packages.sh
+++ b/scripts/travis/deploy_packages.sh
@@ -24,7 +24,9 @@ then
exit 1
fi
-scripts/travis/build.sh
+if [ -z "${NO_BUILD}" ] || [ "${NO_BUILD}" != "true" ]; then
+ scripts/travis/build.sh
+fi
export RELEASE_GENESIS_PROCESS=true
export NO_BUILD=true
diff --git a/test/README.md b/test/README.md
index 59e5760f6..e35a21251 100644
--- a/test/README.md
+++ b/test/README.md
@@ -46,9 +46,9 @@ optional arguments:
--version Future|vXX selects the network template file
```
-To run a specific test:
+To run a specific test, run e2e.sh with -i interactive flag, and follow the instructions:
```
-~$ ./e2e_client_runner.py /full/path/to/e2e_subs/test_script.sh
+test/scripts/e2e.sh -i
```
Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary.
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
index 39f47e12c..f52e57a21 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
@@ -108,15 +108,25 @@ if { [catch {
::AlgorandGoal::StartNode $TEST_ROOT_DIR/Node False $WEBPROXY_LISTEN_ADDRESS
+ # once the node is started we can clear the ::GLOBAL_TEST_ALGO_DIR, so that shutdown would be done as a network.
+ unset ::GLOBAL_TEST_ALGO_DIR
+
::AlgorandGoal::WaitForRound 1 $TEST_ROOT_DIR/Node
set CATCHPOINT [::AlgorandGoal::GetNodeLastCatchpoint $TEST_ROOT_DIR/Primary]
puts "Catchpoint is $CATCHPOINT"
+ regexp -nocase {([0-9]*)#[A-Z2-7]*} $CATCHPOINT CATCHPOINT_ROUND CATCHPOINT_ROUND
+
+ puts "Catchpoint round is $CATCHPOINT_ROUND"
+
+ # wait for the primary to reach $CATCHPOINT_ROUND + 5, so that the catchpoint file would be saved
+ ::AlgorandGoal::WaitForRound [expr {int($CATCHPOINT_ROUND + 5)}] $TEST_ROOT_DIR/Primary
+
::AlgorandGoal::StartCatchup $TEST_ROOT_DIR/Node $CATCHPOINT
- ::AlgorandGoal::WaitForRound 37 $TEST_ROOT_DIR/Node
+ ::AlgorandGoal::WaitForRound $CATCHPOINT_ROUND $TEST_ROOT_DIR/Node
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Node
@@ -164,6 +174,9 @@ if { [catch {
::AlgorandGoal::StartNode $TEST_ROOT_DIR/Node False $WEBPROXY_LISTEN_ADDRESS
+ # once the node is started we can clear the ::GLOBAL_TEST_ALGO_DIR, so that shutdown would be done as a network.
+ set ::GLOBAL_TEST_ALGO_DIR ""
+
::AlgorandGoal::WaitForRound 38 $TEST_ROOT_DIR/Node
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Node
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 273d02db9..345c1be6f 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -934,7 +934,17 @@ proc ::AlgorandGoal::WaitForRound { WAIT_FOR_ROUND_NUMBER NODE_DATA_DIR } {
-re {Genesis ID: (\w+)} {set GENESIS_ID $expect_out(1,string); exp_continue }
-re {Genesis hash: ([A-Za-z0-9+/]+={0,2})} {set GENESIS_HASH $expect_out(1,string); exp_continue }
-re {Catchpoint: ([0-9]*#[A-Z2-7]*)} { set CATCHPOINT $expect_out(1,string); exp_continue }
- eof { catch wait result; if { [lindex $result 3] != 0 } { ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"} }
+ eof {
+ catch wait result;
+ if { [lindex $result 3] != 0 } {
+ log_user 1
+ set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
+ puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
+ set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
+ puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"
+ }
+ }
}
log_user 1
if { $BLOCK > -1 } {
diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go
new file mode 100644
index 000000000..ee741fb8b
--- /dev/null
+++ b/test/e2e-go/features/devmode/devmode_test.go
@@ -0,0 +1,65 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// Check that devmode is functioning as designed.
+package devmode
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ // Start devmode network, and make sure everything is primed by sending a transaction.
+ var fixture fixtures.RestClientFixture
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "DevModeNetwork.json"))
+ fixture.Start()
+ defer fixture.Shutdown()
+ sender, err := fixture.GetRichestAccount()
+ require.NoError(t, err)
+ key := crypto.GenerateSignatureSecrets(crypto.Seed{})
+ receiver := basics.Address(key.SignatureVerifier)
+ txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "")
+ firstRound := txn.ConfirmedRound + 1
+ start := time.Now()
+
+ // 2 transactions should be sent within one normal confirmation time.
+ for i := uint64(0); i < 2; i++ {
+ txn = fixture.SendMoneyAndWait(firstRound+i, 100000, 1000, sender.Address, receiver.String(), "")
+ require.Equal(t, firstRound+i, txn.FirstRound)
+ }
+ require.True(t, time.Since(start) < 8*time.Second, "Transactions should be quickly confirmed faster than usual.")
+
+ // Without transactions there should be no rounds even after a normal confirmation time.
+ time.Sleep(10 * time.Second)
+ status, err := fixture.LibGoalClient.Status()
+ require.NoError(t, err)
+ require.Equal(t, txn.ConfirmedRound, status.LastRound, "There should be no rounds without a transaction.")
+}
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
new file mode 100644
index 000000000..5b2c3ea0c
--- /dev/null
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -0,0 +1,164 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+// Tests in this file are focused on testing how a specific account uses and
+// manages its participation keys. DevMode is used to make things more
+// deterministic.
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+// installParticipationKey generates a new key for a given account and installs it with the client.
+func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
+ dir, err := ioutil.TempDir("", "temporary_partkey_dir")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ // Install overlapping participation keys...
+ part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir)
+ require.NoError(t, err)
+ require.NotNil(t, filePath)
+ require.Equal(t, addr, part.Parent.String())
+
+ resp, err = client.AddParticipationKey(filePath)
+ return
+}
+
+func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) generated.NodeStatusResponse {
+ txParams, err := client.SuggestedParams()
+ require.NoError(t, err)
+ sAccount := part.Address().String()
+ sWH, err := client.GetUnencryptedWalletHandle()
+ require.NoError(t, err)
+ goOnlineTx, err := client.MakeUnsignedGoOnlineTx(sAccount, &part, txParams.LastRound+1, txParams.LastRound+1, txParams.Fee, [32]byte{})
+ require.NoError(t, err)
+ require.Equal(t, sAccount, goOnlineTx.Src().String())
+ onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
+ require.NoError(t, err)
+ require.NotEmpty(t, onlineTxID)
+ status, err := client.WaitForRound(txParams.LastRound)
+ require.NoError(t, err)
+ return status
+}
+
+func TestKeyRegistration(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Skipf("Skipping flaky test. Re-enable with #3255")
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ checkKey := func(key generated.ParticipationKey, firstValid, lastValid, lastProposal uint64, msg string) {
+ require.NotNil(t, key.EffectiveFirstValid, fmt.Sprintf("%s.EffectiveFirstValid", msg))
+ require.NotNil(t, key.EffectiveLastValid, fmt.Sprintf("%s.EffectiveLastValid", msg))
+ require.NotNil(t, key.LastBlockProposal, fmt.Sprintf("%s.LastBlockProposal", msg))
+
+ assert.Equal(t, int(*(key.EffectiveFirstValid)), int(firstValid), fmt.Sprintf("%s.EffectiveFirstValid", msg))
+ assert.Equal(t, int(*(key.EffectiveLastValid)), int(lastValid), fmt.Sprintf("%s.EffectiveLastValid", msg))
+ assert.Equal(t, int(*(key.LastBlockProposal)), int(lastProposal), fmt.Sprintf("%s.LastBlockProposal", msg))
+ }
+
+ // Start devmode network and initialize things for the test.
+ var fixture fixtures.RestClientFixture
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "DevModeOneWallet.json"))
+ fixture.Start()
+ defer fixture.Shutdown()
+ sClient := fixture.GetLibGoalClientForNamedNode("Node")
+ minTxnFee, _, err := fixture.MinFeeAndBalance(0)
+ require.NoError(t, err)
+ accountResponse, err := fixture.GetRichestAccount()
+ require.NoError(t, err)
+ sAccount := accountResponse.Address
+
+ // Add an overlapping participation keys for the account on round 1 and 2
+ last := uint64(6_000_000)
+ numNew := 2
+ for i := 0; i < numNew; i++ {
+ response, part, err := installParticipationKey(t, sClient, sAccount, 0, last+uint64(i))
+ require.NoError(t, err)
+ require.NotNil(t, response)
+ registerParticipationAndWait(t, sClient, part)
+ }
+
+ // Make sure the new keys are installed.
+ keys, err := fixture.LibGoalClient.GetParticipationKeys()
+ require.NoError(t, err)
+ require.Len(t, keys, numNew+1)
+
+ // Zip ahead MaxBalLookback.
+ params, err := fixture.CurrentConsensusParams()
+ require.NoError(t, err)
+ lookback := params.MaxBalLookback
+ for i := uint64(1); i < lookback; i++ {
+ fixture.SendMoneyAndWait(2+i, 0, minTxnFee, sAccount, sAccount, "")
+ }
+
+ // Wait until data has been persisted
+ ready := false
+ waitfor := time.After(1 * time.Minute)
+ for !ready {
+ select {
+ case <-waitfor:
+ ready = true
+ default:
+ keys, err = fixture.LibGoalClient.GetParticipationKeys()
+ ready = (len(keys) >= 3) &&
+ (keys[2].LastBlockProposal != nil) &&
+ (keys[2].EffectiveFirstValid != nil) &&
+ (keys[2].EffectiveLastValid != nil) &&
+ (keys[1].LastBlockProposal != nil) &&
+ (keys[1].EffectiveFirstValid != nil) &&
+ (keys[1].EffectiveLastValid != nil) &&
+ (keys[0].LastBlockProposal != nil) &&
+ (keys[0].EffectiveFirstValid != nil) &&
+ (keys[0].EffectiveLastValid != nil)
+ if !ready {
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ }
+
+ // Verify results, order may vary, key off of the last valid field
+ require.Len(t, keys, 3)
+ for _, k := range keys {
+ switch k.Key.VoteLastValid {
+ case 3_000_000:
+ checkKey(k, 1, lookback, lookback, "keys[0]")
+ case last:
+ checkKey(k, lookback+1, lookback+1, lookback+1, "keys[1]")
+ case last + 1:
+ checkKey(k, lookback+2, last+1, lookback+2, "keys[2]")
+ }
+ }
+}
diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go
new file mode 100644
index 000000000..915691d03
--- /dev/null
+++ b/test/e2e-go/features/participation/participationExpiration_test.go
@@ -0,0 +1,196 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+import (
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string) {
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ pClient := fixture.GetLibGoalClientForNamedNode("Primary")
+
+ sClient := fixture.GetLibGoalClientForNamedNode("Secondary")
+ sWH, err := sClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ sAccount, err := sClient.GenerateAddress(sWH)
+ a.NoError(err)
+
+ // send money to new account from some other account in the template, so that account can go online
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+ richAccount := accountList[0].Address
+ _, initialRound := fixture.GetBalanceAndRound(richAccount)
+
+ minTxnFee, minAcctBalance, err := fixture.MinFeeAndBalance(initialRound)
+ a.NoError(err)
+
+ transactionFee := minTxnFee
+ amountToSendInitial := 5 * minAcctBalance
+
+ initialAmt, err := sClient.GetBalance(sAccount)
+ a.NoError(err)
+
+ fixture.SendMoneyAndWait(initialRound, amountToSendInitial, transactionFee, richAccount, sAccount, "")
+
+ newAmt, err := sClient.GetBalance(sAccount)
+ a.NoError(err)
+
+ a.GreaterOrEqual(newAmt, initialAmt)
+
+ newAccountStatus, err := pClient.AccountInformation(sAccount)
+ a.NoError(err)
+ a.Equal(basics.Offline.String(), newAccountStatus.Status)
+
+ var onlineTxID string
+ var partKeyLastValid uint64
+
+ startTime := time.Now()
+ for time.Since(startTime) < 2*time.Minute {
+ _, currentRound := fixture.GetBalanceAndRound(richAccount)
+ // account adds part key
+ partKeyFirstValid := uint64(0)
+ partKeyValidityPeriod := uint64(10)
+ partKeyLastValid = currentRound + partKeyValidityPeriod
+ partkeyResponse, _, err := sClient.GenParticipationKeys(sAccount, partKeyFirstValid, partKeyLastValid, 0)
+ a.NoError(err)
+ a.Equal(sAccount, partkeyResponse.Parent.String())
+
+ // account uses part key to go online
+ goOnlineTx, err := sClient.MakeUnsignedGoOnlineTx(sAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ a.NoError(err)
+
+ a.Equal(sAccount, goOnlineTx.Src().String())
+ onlineTxID, err = sClient.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
+
+ if err == nil {
+ break
+ }
+
+ if strings.Contains(err.Error(), "transaction tries to mark an account as online with last voting round in the past") {
+ continue
+ }
+
+ // Error occurred
+ logging.TestingLog(t).Errorf("signAndBroadcastTransaction error: %s", err.Error())
+ logging.TestingLog(t).Errorf("first valid: %d, last valid: %d, current round: %d", partKeyFirstValid, partKeyLastValid, currentRound)
+ a.NoError(err)
+ }
+
+ fixture.AssertValidTxid(onlineTxID)
+ maxRoundsToWaitForTxnConfirm := uint64(3)
+
+ sNodeStatus, err := sClient.Status()
+ a.NoError(err)
+ seededRound := sNodeStatus.LastRound
+
+ fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, sAccount, onlineTxID)
+ sNodeStatus, _ = sClient.Status()
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+ a.Equal(basics.Online.String(), newAccountStatus.Status)
+ sAccountData, err := sClient.AccountData(sAccount)
+ a.NoError(err)
+
+ lastValidRound := sAccountData.VoteLastValid
+
+ a.Equal(basics.Round(partKeyLastValid), lastValidRound)
+
+ // We want to wait until we get to one round past the last valid round
+ err = fixture.WaitForRoundWithTimeout(uint64(lastValidRound) + 1)
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+
+ // The account should be online still...
+ a.Equal(basics.Online.String(), newAccountStatus.Status)
+
+ // Now we want to send a transaction to the account and test that
+ // it was taken offline after we sent it something
+
+ _, initialRound = fixture.GetBalanceAndRound(richAccount)
+
+ blk, err := sClient.Block(initialRound)
+ a.NoError(err)
+ a.Equal(blk.CurrentProtocol, protocolCheck)
+
+ fixture.SendMoneyAndWait(initialRound, amountToSendInitial, transactionFee, richAccount, sAccount, "")
+
+ err = fixture.WaitForRoundWithTimeout(uint64(initialRound) + 3)
+
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+
+ // The account should be equal to the target status now
+ a.Equal(finalStatus.String(), newAccountStatus.Status)
+}
+
+// TestParticipationAccountsExpirationFuture tests that sending a transaction to an account with
+// its last valid round being less than the current round will turn it offline. This test will only
+// work when the consensus protocol enables it (in this case the future protocol)
+func TestParticipationAccountsExpirationFuture(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ var fixture fixtures.RestClientFixture
+
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodesExpiredOfflineVFuture.json"))
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ testExpirationAccounts(t, &fixture, basics.Offline, "future")
+}
+
+// TestParticipationAccountsExpirationNonFuture tests that sending a transaction to an account with
+// its last valid round being less than the current round will NOT turn it offline. This tests that
+// when the consensus protocol is less than the required version, it will not turn nodes offline
+func TestParticipationAccountsExpirationNonFuture(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ var fixture fixtures.RestClientFixture
+
+ // V29 is the version before participation key expiration checking was enabled
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodesExpiredOfflineV29.json"))
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29))
+}
diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
index d686ed798..3ec2c323d 100644
--- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
+++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -244,7 +245,16 @@ func TestPartitionHalfOffline(t *testing.T) {
// Start all but 10% of stake and verify we recover
var fixture fixtures.RestClientFixture
- fixture.Setup(t, filepath.Join("nettemplates", "TenNodesDistributedMultiWallet.json"))
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TenNodesDistributedMultiWallet.json"))
+ for _, nodeDir := range fixture.NodeDataDirs() {
+ cfg, err := config.LoadConfigFromDisk(nodeDir)
+ a.NoError(err)
+ // adjust the refresh interval for one hour, so that we won't be reloading the participation key during this test.
+ cfg.ParticipationKeysRefreshInterval = time.Hour
+ cfg.SaveToDisk(nodeDir)
+ }
+ fixture.Start()
+
defer fixture.Shutdown()
// Get the 1st node (with Node1-3 wallets) so we can wait until it has reached the target round
diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go
index b5cd81afb..3cefff35d 100644
--- a/test/e2e-go/features/transactions/accountv2_test.go
+++ b/test/e2e-go/features/transactions/accountv2_test.go
@@ -89,7 +89,7 @@ func TestAccountInformationV2(t *testing.T) {
proto.AgreementFilterTimeout = 400 * time.Millisecond
fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto})
- fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV26.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
@@ -105,13 +105,15 @@ func TestAccountInformationV2(t *testing.T) {
fee := uint64(1000)
- round, err := client.CurrentRound()
- a.NoError(err)
+ var txn transactions.Transaction
// Fund the manager, so it can issue transactions later on
- _, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
+ txn, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
a.NoError(err)
- client.WaitForRound(round + 4)
+
+ round, err := client.CurrentRound()
+ a.NoError(err)
+ fixture.WaitForConfirmedTxn(round+4, creator, txn.ID().String())
// There should be no apps to start with
ad, err := client.AccountData(creator)
@@ -165,10 +167,10 @@ int 1
a.NoError(err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
txid, err := client.BroadcastTransaction(signedTxn)
a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
// ensure transaction is accepted into a block within 5 rounds.
confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()})
a.True(confirmed)
@@ -214,10 +216,10 @@ int 1
a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
txid, err = client.BroadcastTransaction(signedTxn)
a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
_, err = client.WaitForRound(round + 3)
a.NoError(err)
// Ensure the txn committed
@@ -285,16 +287,23 @@ int 1
a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
- _, err = client.BroadcastTransaction(signedTxn)
- a.NoError(err)
- _, err = client.WaitForRound(round + 2)
- a.NoError(err)
- // Ensure the txn committed
- resp, err = client.GetPendingTransactions(2)
+ txid, err = client.BroadcastTransaction(signedTxn)
a.NoError(err)
- a.Equal(uint64(0), resp.TotalTxns)
+ for {
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 1)
+ a.NoError(err)
+ // Ensure the txn committed
+ resp, err = client.GetPendingTransactions(2)
+ a.NoError(err)
+ if resp.TotalTxns == 1 {
+ a.Equal(resp.TruncatedTxns.Transactions[0].TxID, txid)
+ continue
+ }
+ a.Equal(uint64(0), resp.TotalTxns)
+ break
+ }
ad, err = client.AccountData(creator)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index ae11235be..3c12bf4c9 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -17,6 +17,7 @@
package transactions
import (
+ "fmt"
"path/filepath"
"testing"
@@ -145,3 +146,36 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
a.Equal(unmarkedAccountStatus.Status, basics.NotParticipating.String())
}
}
+
+func TestCloseOnError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartlyOfflineVFuture.json"))
+ defer fixture.Shutdown()
+ client := fixture.LibGoalClient
+
+ // Capture the account we're tracking
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+
+ initiallyOnline := accountList[0].Address // 35% stake
+ initiallyOffline := accountList[1].Address // 20% stake
+
+ // get the current round for partkey creation
+ _, curRound := fixture.GetBalanceAndRound(initiallyOnline)
+
+ // make a participation key for initiallyOffline
+ _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ a.NoError(err)
+ // check duplicate keys does not crash
+ _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ a.Equal("PersistedParticipation.Persist: failed to install database: table ParticipationAccount already exists", err.Error())
+ // check lastValid < firstValid does not crash
+ _, _, err = client.GenParticipationKeys(initiallyOffline, curRound+1001, curRound+1000, 0)
+ expected := fmt.Sprintf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", int(curRound+1001), int(curRound+1000))
+ a.Equal(expected, err.Error())
+}
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 49d05ae4a..e8aff991d 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -30,7 +30,6 @@ import (
"time"
"unicode"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
@@ -935,131 +934,6 @@ func TestClientPrioritizesPendingTransactions(t *testing.T) {
a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String())
}
-func TestClientCanGetPendingTransactionInfo(t *testing.T) {
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- a := require.New(fixtures.SynchronizedTest(t))
- var localFixture fixtures.RestClientFixture
- localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
- defer localFixture.Shutdown()
-
- testClient := localFixture.LibGoalClient
-
- testClient.WaitForRound(1)
-
- testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
-
- wh, err := testClient.GetUnencryptedWalletHandle()
- a.NoError(err)
- addresses, err := testClient.ListAddresses(wh)
- a.NoError(err)
- _, someAddress := getMaxBalAddr(t, testClient, addresses)
- if someAddress == "" {
- t.Error("no addr with funds")
- }
- a.NoError(err)
- addr, err := basics.UnmarshalChecksumAddress(someAddress)
-
- params, err := testClient.SuggestedParams()
- a.NoError(err)
-
- firstRound := basics.Round(params.LastRound + 1)
- lastRound := basics.Round(params.LastRound + 1000)
- var gh crypto.Digest
- copy(gh[:], params.GenesisHash)
-
- prog := `#pragma version 5
-byte "A"
-loop:
-int 0
-dup2
-getbyte
-int 1
-+
-dup
-int 97 //ascii code of last char
-<=
-bz end
-setbyte
-dup
-log
-b loop
-end:
-int 1
-return
-`
- ops, err := logic.AssembleString(prog)
- approv := ops.Program
- ops, err = logic.AssembleString("#pragma version 5 \nint 1")
- clst := ops.Program
-
- gl := basics.StateSchema{
- NumByteSlice: 1,
- }
- lc := basics.StateSchema{
- NumByteSlice: 1,
- }
- minTxnFee, _, err := localFixture.CurrentMinFeeAndBalance()
-
- tx, err := testClient.MakeUnsignedApplicationCallTx(0, nil, addresses, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
- tx.Sender = addr
- tx.Fee = basics.MicroAlgos{Raw: minTxnFee}
- tx.FirstValid = firstRound
- tx.LastValid = lastRound
- tx.GenesisHash = gh
-
- txid, err := testClient.SignAndBroadcastTransaction(wh, nil, tx)
- a.NoError(err)
- _, err = waitForTransaction(t, testClient, someAddress, txid, 60*time.Second)
- a.NoError(err)
- txn, err := testClient.PendingTransactionInformationV2(txid)
- a.NoError(err)
- a.NotNil(txn.Logs)
- a.Equal(32, len(*txn.Logs))
- for i, l := range *txn.Logs {
- assert.Equal(t, []byte(string(rune('B'+i))), l)
- }
-
- //check non-create app call
- wh, err = testClient.GetUnencryptedWalletHandle()
- a.NoError(err)
- addresses, err = testClient.ListAddresses(wh)
- a.NoError(err)
- _, someAddress = getMaxBalAddr(t, testClient, addresses)
- if someAddress == "" {
- t.Error("no addr with funds")
- }
- a.NoError(err)
- addr, err = basics.UnmarshalChecksumAddress(someAddress)
-
- params, err = testClient.SuggestedParams()
- a.NoError(err)
-
- firstRound = basics.Round(params.LastRound + 1)
- lastRound = basics.Round(params.LastRound + 1000)
-
- tx, err = testClient.MakeUnsignedAppNoOpTx(*txn.ApplicationIndex, nil, addresses, nil, nil)
- tx.Sender = addr
- tx.Fee = basics.MicroAlgos{Raw: minTxnFee}
- tx.FirstValid = firstRound
- tx.LastValid = lastRound
- tx.GenesisHash = gh
-
- txid, err = testClient.SignAndBroadcastTransaction(wh, nil, tx)
- a.NoError(err)
- _, err = waitForTransaction(t, testClient, someAddress, txid, 60*time.Second)
- a.NoError(err)
- txn, err = testClient.PendingTransactionInformationV2(txid)
- a.NoError(err)
- a.NotNil(txn.Logs)
- a.Equal(32, len(*txn.Logs))
- for i, l := range *txn.Logs {
- assert.Equal(t, []byte(string(rune('B'+i))), l)
- }
-
-}
-
func TestPendingTransactionInfoInnerTxnAssetCreate(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index e6a6a58ab..3def211f9 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -59,10 +59,29 @@ func TestRekeyUpgrade(t *testing.T) {
addrB, err := basics.UnmarshalChecksumAddress(accountB)
a.NoError(err)
+ accountC, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ accountD, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ addrD, err := basics.UnmarshalChecksumAddress(accountD)
+ a.NoError(err)
+
fee := uint64(1000)
amount := uint64(1000000)
lease := [32]byte{}
+ // move some money from accountA -> accountC
+ tx, err := client.ConstructPayment(accountA, accountC, fee, amount*10, nil, "", lease, basics.Round(0), basics.Round(0))
+ a.NoError(err)
+
+ fundAccountC, err := client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+
+ _, err = client.BroadcastTransaction(fundAccountC)
+ a.NoError(err)
+
curStatus, err := client.Status()
a.NoError(err)
initialStatus := curStatus
@@ -79,11 +98,11 @@ func TestRekeyUpgrade(t *testing.T) {
a.Equal(basics.Address{}, ad.AuthAddr)
// rekey A -> B (RekeyTo check)
- tx, err := client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(initialStatus.NextVersionRound).SubSaturate(1))
+ tx, err = client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(initialStatus.NextVersionRound).SubSaturate(1))
a.NoError(err)
tx.RekeyTo = addrB
- rekey, err := client.SignTransactionWithWalletAndSigner(wh, nil, "", tx)
+ rekey, err := client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
_, err = client.BroadcastTransaction(rekey)
@@ -137,12 +156,11 @@ func TestRekeyUpgrade(t *testing.T) {
}
// now that the network already upgraded:
-
- tx, err = client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(round+1000))
+ tx, err = client.ConstructPayment(accountC, accountD, fee, amount, nil, "", lease, basics.Round(round), basics.Round(round+1000))
a.NoError(err)
- tx.RekeyTo = addrB
+ tx.RekeyTo = addrD
- rekey, err = client.SignTransactionWithWalletAndSigner(wh, nil, "", tx)
+ rekey, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
// now, that we have upgraded to the new protocol which supports rekey, try again.
@@ -155,7 +173,7 @@ func TestRekeyUpgrade(t *testing.T) {
// use rekeyed key to authorize (AuthAddr check)
tx.RekeyTo = basics.Address{}
- rekeyed, err = client.SignTransactionWithWalletAndSigner(wh, nil, accountB, tx)
+ rekeyed, err = client.SignTransactionWithWalletAndSigner(wh, nil, accountD, tx)
a.NoError(err)
_, err = client.BroadcastTransaction(rekeyed)
diff --git a/test/framework/fixtures/fixture.go b/test/framework/fixtures/fixture.go
index 0693a7ecb..44ad4b132 100644
--- a/test/framework/fixtures/fixture.go
+++ b/test/framework/fixtures/fixture.go
@@ -109,7 +109,6 @@ func (st *synchTest) Error(args ...interface{}) {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Error(args...)
}
}
@@ -117,7 +116,6 @@ func (st *synchTest) Errorf(format string, args ...interface{}) {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Errorf(format, args...)
}
}
@@ -125,7 +123,6 @@ func (st *synchTest) Fail() {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Fail()
}
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 0aea3989c..7e57f4bfa 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -170,6 +170,7 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
f.failOnError(err, "couldn't import secret: %v")
}
accountsWithRootKeys[root.Address().String()] = true
+ handle.Close()
} else if config.IsPartKeyFilename(filename) {
// Fetch a handle to this database
handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename))
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index 276e6cb73..d91b780ae 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -218,7 +218,7 @@ class watcher:
if net in self.netseen:
return
self.netseen.add(net)
- net = net + ':8580'
+ net = net + ':' + self.args.port
try:
ad = algodDir(net, net=net, token=self.args.token, admin_token=self.args.admin_token)
self.they.append(ad)
@@ -279,6 +279,7 @@ def main():
ap.add_argument('--tf-roles', default='relay', help='comma separated list of terraform roles to follow')
ap.add_argument('--tf-name-re', action='append', default=[], help='regexp to match terraform node names, may be repeated')
ap.add_argument('--no-svg', dest='svg', default=True, action='store_false', help='do not automatically run `go tool pprof` to generate svg from collected data')
+ ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
ap.add_argument('-o', '--out', default=None, help='directory to write to')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
index 0cf1f22c6..937159145 100644
--- a/test/heapwatch/metrics_delta.py
+++ b/test/heapwatch/metrics_delta.py
@@ -8,6 +8,7 @@ import glob
import gzip
import logging
import json
+import math
import os
import re
import statistics
@@ -21,6 +22,21 @@ def num(x):
return float(x)
return int(x)
+def hunum(x):
+ if x >= 10000000000:
+ return '{:.1f}G'.format(x / 1000000000.0)
+ if x >= 1000000000:
+ return '{:.2f}G'.format(x / 1000000000.0)
+ if x >= 10000000:
+ return '{:.1f}M'.format(x / 1000000.0)
+ if x >= 1000000:
+ return '{:.2f}M'.format(x / 1000000.0)
+ if x >= 10000:
+ return '{:.1f}k'.format(x / 1000.0)
+ if x >= 1000:
+ return '{:.2f}k'.format(x / 1000.0)
+ return '{:.2f}x'.format(x)
+
metric_line_re = re.compile(r'(\S+\{[^}]*\})\s+(.*)')
def test_metric_line_re():
@@ -118,7 +134,8 @@ def meanOrZero(seq):
return statistics.mean(seq)
class summary:
- def __init__(self):
+ def __init__(self, label=None):
+ self.label = label or ""
self.tpsMeanSum = 0
self.txBpsMeanSum = 0
self.rxBpsMeanSum = 0
@@ -179,12 +196,23 @@ class summary:
mins.append(min(txp))
maxs.append(max(txp))
means.append(statistics.mean(txp))
- return 'txnpool({} {} {} {} {})'.format(
+ if not means or not maxs or not mins:
+ return 'txnpool(no stats)'
+ return 'txnpool({:.0f} {:.0f} {:.0f} {:.0f} {:.0f})'.format(
min(mins), min(means), statistics.mean(means), max(means), max(maxs)
)
def __str__(self):
- return '{}\n{}\nsummary: {:0.2f} TPS, {:0.0f} tx B/s, {:0.0f} rx B/s'.format(self.byMsg(), self.txPool(), self.tpsMeanSum/self.sumsCount, self.txBpsMeanSum/self.sumsCount, self.rxBpsMeanSum/self.sumsCount)
+ if not self.sumsCount:
+ tps, txbps, rxbps = math.nan, math.nan, math.nan
+ else:
+ tps = self.tpsMeanSum/self.sumsCount
+ txbps = self.txBpsMeanSum/self.sumsCount
+ rxbps = self.rxBpsMeanSum/self.sumsCount
+ labelspace = ""
+ if self.label:
+ labelspace = self.label + " "
+ return '{byMsg}\n{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps))
def anynickre(nick_re, nicks):
if not nick_re:
@@ -216,7 +244,14 @@ def gather_metrics_files_by_nick(metrics_files, metrics_dirs=None):
continue
nick = m.group(1)
dapp(filesByNick, nick, path)
- return filesByNick
+ return tf_inventory_path, filesByNick, nonick
+
+def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args):
+ nretup = (nre,)
+ for rnick, paths in filesByNick.items():
+ nick = nick_to_tfname.get(rnick, rnick)
+ if anynickre(nretup, (rnick,nick)):
+ rsum(process_files(args, nick, paths), nick)
def main():
test_metric_line_re()
@@ -227,6 +262,7 @@ def main():
ap.add_argument('--deltas', default=None, help='path to write csv deltas')
ap.add_argument('--report', default=None, help='path to write csv report')
ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated')
+ ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -240,7 +276,7 @@ def main():
if args.dir:
metrics_dirs.add(args.dir)
metrics_files += glob.glob(os.path.join(args.dir, '*.metrics'))
- filesByNick = gather_metrics_files_by_nick(metrics_files, metrics_dirs)
+ tf_inventory_path, filesByNick, nonick = gather_metrics_files_by_nick(metrics_files, metrics_dirs)
if not tf_inventory_path:
for md in metrics_dirs:
tp = os.path.join(md, 'terraform-inventory.host')
@@ -270,21 +306,26 @@ def main():
elif len(found) > 1:
logger.warning('ip %s (%s) found in nicks: %r', ip, name, found)
else:
- logger.warning('ip %s no nick')
+ logger.warning('ip %s (%s) no nick', ip, name)
#logger.debug('nick_to_tfname %r', nick_to_tfname)
if args.nick_re:
# use each --nick-re=foo as a group
for nre in args.nick_re:
rsum = summary()
- nretup = (nre,)
- for rnick, paths in filesByNick.items():
- nick = nick_to_tfname.get(rnick, rnick)
- if anynickre(nretup, (rnick,nick)):
- rsum(process_files(args, nick, paths), nick)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
print(rsum)
print('\n')
return 0
+ if args.nick_lre:
+ for lnre in args.nick_lre:
+ label, nre = lnre.split(':', maxsplit=1)
+ rsum = summary(label)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
+ print(rsum)
+ print('\n')
+ return 0
+
# no filters, glob it all up
rsum = summary()
diff --git a/test/muleCI/Jenkinsfile b/test/muleCI/Jenkinsfile
index ff8a41ec4..7f4d5a219 100644
--- a/test/muleCI/Jenkinsfile
+++ b/test/muleCI/Jenkinsfile
@@ -1,3 +1,3 @@
@Library('go-algorand-ci') _
-muleCI('test/muleCI/mule.yaml', '0.0.12')
+muleCI('test/muleCI/mule.yaml', '0.0.23')
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index 0ec2ac277..2f4e3af80 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -9,6 +9,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=amd64
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
@@ -22,6 +23,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=amd64
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
@@ -35,6 +37,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=arm64
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm64v8
@@ -48,6 +51,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=arm
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm32v6
@@ -60,6 +64,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=amd64
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
volumes:
@@ -73,6 +78,7 @@ agents:
- NETWORK=$NETWORK
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=amd64
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
volumes:
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index 594efb0c2..571d11781 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -24,7 +24,7 @@ Options:
-n Run tests without building binaries (Binaries are expected in PATH)
"
NO_BUILD=false
-while getopts ":c:nh" opt; do
+while getopts ":c:nhi" opt; do
case ${opt} in
c ) CHANNEL=$OPTARG
;;
@@ -33,7 +33,11 @@ while getopts ":c:nh" opt; do
;;
h ) echo "${HELP}"
exit 0
- ;;
+ ;;
+ i ) echo " Interactive session"
+ echo "######################################################################"
+ INTERACTIVE=true
+ ;;
\? ) echo "${HELP}"
exit 2
;;
@@ -122,7 +126,24 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
"${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography
duration "e2e client setup"
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh
+ if [ $INTERACTIVE ]; then
+ echo "********** READY **********"
+ echo "The test environment is now set. Run the tests using the following command on a different terminal after setting the path."
+ echo ""
+ echo "export VIRTUAL_ENV=\"${TEMPDIR}/ve\""
+ echo "export PATH=\"\$VIRTUAL_ENV/bin:\$PATH\""
+ echo ""
+ echo "${TEMPDIR}/ve/bin/python3" test/scripts/e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/SCRIPT_FILE_NAME
+ echo ""
+ echo "Press enter to shut down the test environment..."
+ read a
+ echo -n "deactivating..."
+ deactivate
+ echo "done"
+ exit
+ fi
+
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
duration "parallel client runner"
for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
diff --git a/test/scripts/e2e_subs/app-assets.sh b/test/scripts/e2e_subs/app-assets.sh
new file mode 100755
index 000000000..8b1fa6f5d
--- /dev/null
+++ b/test/scripts/e2e_subs/app-assets.sh
@@ -0,0 +1,266 @@
+#!/bin/bash
+
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+
+my_dir="$(dirname "$0")"
+source "$my_dir/rest.sh" "$@"
+function rest() {
+ curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1"
+}
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+TEAL=test/scripts/e2e_subs/tealprogs
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+# Create a smaller account so rewards won't change balances.
+SMALL=$(${gcmd} account new | awk '{ print $6 }')
+# Under one algo receives no rewards
+${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$SMALL"
+
+function balance {
+ acct=$1; shift
+ goal account balance -a "$acct" | awk '{print $1}'
+}
+
+[ "$(balance "$ACCOUNT")" = 999999000000 ]
+[ "$(balance "$SMALL")" = 999000 ]
+
+function created_assets {
+ acct=$1;
+ goal account info -a "$acct" | awk '/Created Assets:/,/Held Assets:/' | grep "ID*" | awk -F'[, ]' '{print $4}'
+}
+
+function created_supply {
+ acct=$1;
+ goal account info -a "$acct" | awk '/Created Assets:/,/Held Assets:/' | grep "ID*" | awk -F'[, ]' '{print $7}'
+}
+
+function asset_bal {
+ acct=$1;
+ goal account info -a "$acct" | awk '/Held Assets:/,/Created Apps:/' | grep "ID*" | awk -F'[, ]' '{print $7}'
+}
+
+function asset_ids {
+ acct=$1;
+ goal account info -a "$acct" | awk '/Held Assets:/,/Created Apps:/' | grep "ID*" | awk -F'[, ]' '{print $2}'
+}
+#
+function assets {
+ acct=$1;
+ goal account info -a "$acct" | awk '/Held Assets:/,/Created Apps:/' | grep "ID*" | awk -F'[, ]' '{print $4}'
+}
+
+APPID=$(${gcmd} app create --creator "${SMALL}" --approval-prog=${TEAL}/assets-escrow.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }')
+[ "$(balance "$SMALL")" = 998000 ] # 1000 fee
+
+function appl {
+ method=$1; shift
+ ${gcmd} app call --app-id="$APPID" --app-arg="str:$method" "$@"
+}
+
+function app-txid {
+ # When app (call or optin) submits, this is how the txid is
+ # printed. Not in appl() because appl is also used with -o to
+ # create tx
+ grep -o -E 'txid [A-Z0-9]{52}' | cut -c 6- | head -1
+}
+
+function asset-id {
+ grep -o -E 'index [A-Z0-9]+'| cut -c 7-
+}
+
+APPACCT=$(python -c "import algosdk.encoding as e; print(e.encode_address(e.checksum(b'appID'+($APPID).to_bytes(8, 'big'))))")
+
+function asset-create {
+ amount=$1; shift
+ ${gcmd} asset create --creator "$SMALL" --total "$amount" --decimals 0 "$@"
+}
+
+function asset-deposit {
+ amount=$1;shift
+ ID=$1; shift
+ ${gcmd} asset send -f "$SMALL" -t "$APPACCT" -a "$amount" --assetid "$ID" "$@"
+}
+
+function asset-optin {
+ ${gcmd} asset send -a 0 "$@"
+}
+
+function clawback_addr {
+ grep -o -E 'Clawback address: [A-Z0-9]{58}' | awk '{print $3}'
+}
+
+function payin {
+ amount=$1; shift
+ ${gcmd} clerk send -f "$SMALL" -t "$APPACCT" -a "$amount" "$@"
+}
+
+T=$TEMPDIR
+
+function sign {
+ ${gcmd} clerk sign -i "$T/$1.tx" -o "$T/$1.stx"
+}
+
+TXID=$(${gcmd} app optin --app-id "$APPID" --from "${SMALL}" | app-txid)
+# Rest succeeds, no stray inner-txn array
+[ "$(rest "/v2/transactions/pending/$TXID" | jq '.["inner-txn"]')" == null ]
+[ "$(balance "$SMALL")" = 997000 ] # 1000 fee
+
+ASSETID=$(asset-create 1000000 --name "e2e" --unitname "e" | asset-id)
+[ "$(balance "$SMALL")" = 996000 ] # 1000 fee
+
+${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$APPACCT"
+appl "optin():void" --foreign-asset="$ASSETID" --from="$SMALL"
+[ "$(balance "$APPACCT")" = 998000 ] # 1000 fee
+[ "$(balance "$SMALL")" = 995000 ]
+
+appl "deposit():void" -o "$T/deposit.tx" --from="$SMALL"
+asset-deposit 1000 $ASSETID -o "$T/axfer1.tx"
+cat "$T/deposit.tx" "$T/axfer1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx"
+sign group
+${gcmd} clerk rawsend -f "$T/group.stx"
+
+[ "$(asset_ids "$SMALL")" = $ASSETID ] # asset ID
+[ "$(asset_bal "$SMALL")" = 999000 ] # asset balance
+[ "$(asset_ids "$APPACCT")" = $ASSETID ]
+[ "$(asset_bal "$APPACCT")" = 1000 ]
+[ "$(balance "$SMALL")" = 993000 ] # 2 fees
+[ "$(balance "$APPACCT")" = 998000 ]
+
+# Withdraw 100 in app. Confirm that inner txn is visible to transaction API.
+TXID=$(appl "withdraw(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --from="$SMALL" | app-txid)
+[ "$(rest "/v2/transactions/pending/$TXID" \
+ | jq '.["inner-txns"][0].txn.txn.aamt')" = 100 ]
+[ "$(rest "/v2/transactions/pending/$TXID?format=msgpack" | msgpacktool -d \
+ | jq '.["inner-txns"][0].txn.txn.type')" = '"axfer"' ]
+# Now confirm it's in blocks API (this time in our internal form)
+ROUND=$(rest "/v2/transactions/pending/$TXID" | jq '.["confirmed-round"]')
+rest "/v2/blocks/$ROUND" | jq .block.txns[0].dt.itx
+
+[ "$(asset_bal "$SMALL")" = 999100 ] # 100 asset withdrawn
+[ "$(asset_bal "$APPACCT")" = 900 ] # 100 asset withdrawn
+[ "$(balance "$SMALL")" = 992000 ] # 1 fee
+[ "$(balance "$APPACCT")" = 997000 ] # fee paid by app
+
+appl "withdraw(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --fee 2000 --from="$SMALL"
+[ "$(asset_bal "$SMALL")" = 999200 ] # 100 asset withdrawn
+[ "$(balance "$SMALL")" = 990000 ] # 2000 fee
+[ "$(asset_bal "$APPACCT")" = 800 ] # 100 asset withdrawn
+[ "$(balance "$APPACCT")" = 997000 ] # fee credit used
+
+# Try to withdraw too much
+appl "withdraw(uint64):void" --app-arg="int:1000" --foreign-asset="$ASSETID" --from="$SMALL" && exit 1
+[ "$(asset_bal "$SMALL")" = 999200 ] # no change
+[ "$(asset_bal "$APPACCT")" = 800 ] # no change
+[ "$(balance "$SMALL")" = 990000 ]
+[ "$(balance "$APPACCT")" = 997000 ]
+
+# Show that it works AT exact asset balance
+appl "withdraw(uint64):void" --app-arg="int:800" --foreign-asset="$ASSETID" --from="$SMALL"
+[ "$(asset_bal "$SMALL")" = 1000000 ]
+[ "$(asset_bal "$APPACCT")" = 0 ]
+[ "$(balance "$SMALL")" = 989000 ]
+[ "$(balance "$APPACCT")" = 996000 ]
+
+USER=$(${gcmd} account new | awk '{ print $6 }') #new account
+${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER" #fund account
+asset-optin -f "$USER" -t "$USER" --assetid "$ASSETID" #opt in to asset
+# SET $USER as clawback address
+${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $USER
+cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr)
+[ "$cb_addr" = "$USER" ]
+${gcmd} asset send -f "$SMALL" -t "$USER" -a "1000" --assetid "$ASSETID" --clawback "$USER"
+[ $(asset_bal "$USER") = 1000 ]
+[ $(asset_bal "$SMALL") = 999000 ]
+# rekey $USER to "$APPACCT"
+${gcmd} clerk send --from "$USER" --to "$USER" -a 0 --rekey-to "$APPACCT"
+# $USER should still have clawback auth. should have been authorized by "$APPACCT"
+${gcmd} asset send -f "$SMALL" -t "$USER" -a "1000" --assetid "$ASSETID" --clawback "$USER" && exit 1
+
+USER2=$(${gcmd} account new | awk '{ print $6 }') #new account
+${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER2" #fund account
+asset-optin -f "$USER2" -t "$USER2" --assetid "$ASSETID" #opt in to asset
+# set $APPACCT as clawback address on asset
+${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $APPACCT
+cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr)
+[ "$cb_addr" = "$APPACCT" ] #app is set as clawback address
+# transfer asset from $SMALL to $USER
+appl "transfer(uint64):void" --app-arg="int:1000" --foreign-asset="$ASSETID" --from="$SMALL" --app-account="$USER2"
+[ $(asset_bal "$USER2") = 1000 ]
+[ $(asset_bal "$SMALL") = 998000 ]
+# transfer asset from $USER to $SMALL
+appl "transfer(uint64):void" --app-arg="int:100" --foreign-asset="$ASSETID" --from="$USER2" --app-account="$SMALL"
+[ $(asset_bal "$USER2") = 900 ]
+[ $(asset_bal "$SMALL") = 998100 ]
+
+# opt in more assets
+ASSETID2=$(asset-create 1000000 --name "alpha" --unitname "a" | asset-id)
+appl "optin():void" --foreign-asset="$ASSETID2" --from="$SMALL"
+ASSETID3=$(asset-create 1000000 --name "beta" --unitname "b" | asset-id)
+appl "optin():void" --foreign-asset="$ASSETID3" --from="$SMALL"
+
+IDs="$ASSETID
+$ASSETID2
+$ASSETID3"
+[[ "$(asset_ids "$APPACCT")" = $IDs ]] # account has 3 assets
+
+# opt out of assets
+appl "close():void" --foreign-asset="$ASSETID2" --from="$SMALL"
+IDs="$ASSETID
+$ASSETID3"
+[[ "$(asset_ids "$APPACCT")" = $IDs ]] # account has 2 assets
+appl "close():void" --foreign-asset="$ASSETID" --from="$SMALL"
+appl "close():void" --foreign-asset="$ASSETID3" --from="$SMALL"
+[[ "$(asset_ids "$APPACCT")" = "" ]] # account has no assets
+
+# app creates asset
+appl "create(uint64):void" --app-arg="int:1000000" --from="$SMALL"
+[ "$(created_assets "$APPACCT")" = "X" ]
+[ "$(created_supply "$APPACCT")" = 1000000 ]
+
+# mint asset
+APPASSETID=$(asset_ids "$APPACCT")
+asset-optin -f "$SMALL" -t "$SMALL" --assetid "$APPASSETID" #opt in to asset
+appl "mint():void" --from="$SMALL" --foreign-asset="$APPASSETID" -o "$T/mint.tx"
+payin 1000 -o "$T/pay1.tx"
+cat "$T/mint.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx"
+sign group
+${gcmd} clerk rawsend -f "$T/group.stx"
+
+IDs="$ASSETID
+$ASSETID2
+$ASSETID3
+$APPASSETID"
+[[ "$(asset_ids "$SMALL")" = $IDs ]] # has new asset
+[ "$(asset_bal "$SMALL" | awk 'FNR==4{print $0}')" = 1000 ] # correct balances
+[ "$(asset_bal "$APPACCT")" = 999000 ] # 1k sent
+
+# freeze asset
+appl "freeze(uint64):void" --app-arg="int:1" --foreign-asset="$APPASSETID" --from="$SMALL"
+# fail since asset is frozen on $SMALL
+appl "mint():void" --from="$SMALL" -o "$T/mint.tx" --foreign-asset="$APPASSETID"
+payin 1000 -o "$T/pay1.tx"
+cat "$T/mint.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx"
+sign group
+${gcmd} clerk rawsend -f "$T/group.stx" && exit 1
+# unfreeze asset
+appl "freeze(uint64):void" --app-arg="int:0" --foreign-asset="$APPASSETID" --from="$SMALL"
+appl "mint():void" --from="$SMALL" -o "$T/mint.tx" --foreign-asset="$APPASSETID"
+payin 1000 -o "$T/pay1.tx"
+cat "$T/mint.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx"
+sign group
+${gcmd} clerk rawsend -f "$T/group.stx"
+[ "$(asset_bal "$SMALL" | awk 'FNR==4{print $0}')" = 2000 ] # minted 1000
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/app-group.py b/test/scripts/e2e_subs/app-group.py
new file mode 100755
index 000000000..738e1cb79
--- /dev/null
+++ b/test/scripts/e2e_subs/app-group.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from goal import Goal
+
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1], autosend=True)
+
+joe = goal.new_account()
+
+txinfo, err = goal.pay(goal.account, joe, amt=500_000)
+assert not err, err
+
+# Turn off rewards for precise balance checking
+txinfo, err = goal.keyreg(joe, nonpart=True)
+assert not err, err
+joeb = goal.balance(joe)
+
+teal = """
+#pragma version 6
+ txn ApplicationID
+ bz end
+ // Pay the sender and Accounts[1]. Force the second fee to default high
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+
+ txn Sender
+ itxn_field Receiver
+
+ int 5
+ itxn_field Amount
+
+ int 0
+ itxn_field Fee // No fee, so 2nd fee is doubled
+
+ itxn_next
+
+ int pay
+ itxn_field TypeEnum
+
+ txn Accounts 1
+ itxn_field Receiver
+
+ int 5
+ itxn_field Amount
+
+ itxn_submit
+
+ itxn Fee
+ int 2000
+ ==
+ assert
+
+end:
+ int 1
+"""
+
+txinfo, err = goal.app_create(joe, goal.assemble(teal))
+assert not err, err
+app_id = txinfo['application-index']
+assert app_id
+
+# Fund the app account
+txinfo, err = goal.pay(goal.account, goal.app_address(app_id), amt=400_000)
+assert not err, err
+
+
+txinfo, err = goal.app_call(joe, app_id, accounts=[goal.account])
+assert not err, err
+
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/app-rekey.py b/test/scripts/e2e_subs/app-rekey.py
new file mode 100755
index 000000000..94bfcd22a
--- /dev/null
+++ b/test/scripts/e2e_subs/app-rekey.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from goal import Goal
+
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1], autosend=True)
+
+joe = goal.new_account()
+flo = goal.new_account()
+
+txinfo, err = goal.pay(goal.account, joe, amt=500_000)
+assert not err, err
+
+# Turn off rewards for precise balance checking
+txinfo, err = goal.keyreg(joe, nonpart=True)
+assert not err, err
+joeb = goal.balance(joe)
+
+txinfo, err = goal.pay(goal.account, flo, amt=500_000)
+assert not err, err
+
+teal = """
+#pragma version 6
+ txn ApplicationID
+ bz end
+ // Use the rekeyed account to make a payment, and give it back
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+
+ txn Accounts 1
+ itxn_field Sender
+
+ txn Accounts 0
+ itxn_field Receiver
+
+ int 5
+ itxn_field Amount
+
+ txn Accounts 1
+ itxn_field RekeyTo
+ itxn_submit
+
+end:
+ int 1
+"""
+
+txinfo, err = goal.app_create(joe, goal.assemble(teal))
+assert not err, err
+joeb = joeb-1000
+app_id = txinfo['application-index']
+assert app_id
+
+app_addr = goal.app_address(app_id)
+# flo rekeys her account to the app, app spends from it, then rekeys it back
+txinfo, err = goal.pay(flo, joe, amt=1, rekey_to=app_addr)
+assert not err, err
+assert goal.balance(joe) == joeb+1, goal.balance(joe)
+
+# can no longer spend
+txinfo, err = goal.pay(flo, joe, amt=1)
+assert err
+assert goal.balance(joe) == joeb+1, goal.balance(joe)
+
+txinfo, err = goal.app_call(joe, app_id, accounts=[flo])
+assert not err, err
+joeb = joeb-1000
+assert goal.balance(joe) == joeb+6, goal.balance(joe)
+
+# can spend again
+txinfo, err = goal.pay(flo, joe, amt=1)
+assert not err, err
+assert goal.balance(joe) == joeb+7, goal.balance(joe)
+
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/e2e-app-abi-arg.sh b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
new file mode 100755
index 000000000..c6f719a47
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+date '+app-abi-arg-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+GLOBAL_INTS=2
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+PROGRAM=($(${gcmd} clerk compile "${TEMPDIR}/simple.teal"))
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-arg.teal --clear-prog ${TEMPDIR}/simple.teal --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Should succeed to opt in with string "optin"
+${gcmd} app optin --app-id $APPID --from $ACCOUNT --app-arg 'abi:string:"optin"'
+
+# Call should now succeed
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:uint64:0'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:byte[3]:"AAEC"'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:(string,(byte[3],ufixed64x3)):["uwu",["AAEC",12.34]]'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:(uint64,string,bool[]):[399,"should pass",[true,false,false,true]]'
+
+# Delete application should still succeed
+${gcmd} app delete --app-id $APPID --from $ACCOUNT
+
+# Clear should still succeed
+${gcmd} app clear --app-id $APPID --from $ACCOUNT
diff --git a/test/scripts/e2e_subs/e2e-app-abi-method.sh b/test/scripts/e2e_subs/e2e-app-abi-method.sh
new file mode 100755
index 000000000..ec3a0d71e
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-abi-method.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+date '+app-abi-method-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+PROGRAM=($(${gcmd} clerk compile "${TEMPDIR}/simple.teal"))
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple.teal --global-byteslices 0 --global-ints 0 --local-byteslices 1 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Opt in
+RES=$(${gcmd} app method --method "optIn(string)string" --arg "\"Algorand Fan\"" --on-completion optin --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method optIn(string)string succeeded with output: \"hello Algorand Fan\""
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to optIn(string)string should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# 1 + 2 = 3
+RES=$(${gcmd} app method --method "add(uint64,uint64)uint64" --arg 1 --arg 2 --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method add(uint64,uint64)uint64 succeeded with output: 3"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to add(uint64,uint64)uint64 should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# 18446744073709551614 + 1 = 18446744073709551615
+RES=$(${gcmd} app method --method "add(uint64,uint64)uint64" --arg 18446744073709551614 --arg 1 --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method add(uint64,uint64)uint64 succeeded with output: 18446744073709551615"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to add(uint64,uint64)uint64 should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+goal clerk send --from $ACCOUNT --to $ACCOUNT --amount 1000000 -o "${TEMPDIR}/pay-txn-arg.tx"
+
+# Payment with return true
+RES=$(${gcmd} app method --method "payment(pay,uint64)bool" --arg ${TEMPDIR}/pay-txn-arg.tx --arg 1000000 --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method payment(pay,uint64)bool succeeded with output: true"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to payment(pay,uint64)bool should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# Payment with return false
+RES=$(${gcmd} app method --method "payment(pay,uint64)bool" --arg ${TEMPDIR}/pay-txn-arg.tx --arg 1000001 --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method payment(pay,uint64)bool succeeded with output: false"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to payment(pay,uint64)bool should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# Close out
+RES=$(${gcmd} app method --method "closeOut()string" --on-completion closeout --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method closeOut()string succeeded with output: \"goodbye Algorand Fan\""
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to closeOut()string should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# Delete
+RES=$(${gcmd} app method --method "delete()void" --on-completion deleteapplication --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method delete()void succeeded"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-method-test FAIL the method call to delete()void should not fail %Y%m%d_%H%M%S'
+ false
+fi
diff --git a/test/scripts/e2e_subs/e2e-logs.sh b/test/scripts/e2e_subs/e2e-logs.sh
new file mode 100755
index 000000000..7d40a62e1
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-logs.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+
+my_dir="$(dirname "$0")"
+source "$my_dir/rest.sh" "$@"
+function rest() {
+ curl -q -s -H "Authorization: Bearer $PUB_TOKEN" "$NET$1"
+}
+
+function app_txid {
+ # When app (call or optin) submits, this is how the txid is
+ # printed.
+ grep -o -E 'txid [A-Z0-9]{52}' | cut -c 6- | head -1
+}
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+TEAL=test/scripts/e2e_subs/tealprogs
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+EXP=(B C D E F G H I J K L M N O P Q R S T U V W X Y Z \[ \\ \] ^ _ \` a b )
+
+# app create
+TXID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/logs.teal --global-byteslices 4 --global-ints 0 --local-byteslices 0 --local-ints 1 --clear-prog=${TEAL}/approve-all.teal | app_txid)
+response=$(rest "/v2/transactions/pending/$TXID")
+# log len
+[ "$(echo "$response" | jq '.logs | length')" = 32 ]
+# log content
+i=0
+for log in $(echo "$response" | jq -r '.logs | .[]')
+ do
+ c=`echo -n "${log}" | base64 --decode`
+ [ "$c" = "${EXP[i]}" ]
+ i=$((i+1))
+ done
+
+# app call
+APPID=$(echo "$response" | jq '.["application-index"]')
+TXID=$(${gcmd} app call --app-id "${APPID}" --from "$ACCOUNT" | app_txid)
+response=$(rest "/v2/transactions/pending/$TXID")
+# log len
+[ "$(echo "$response" | jq '.logs | length')" = 32 ]
+# log content
+i=0
+for log in $(echo "$response" | jq -r '.logs | .[]')
+ do
+ c=`echo -n "${log}" | base64 --decode`
+ [ "$c" = "${EXP[i]}" ]
+ i=$((i+1))
+ done
+
+date "+${scriptname} OK %Y%m%d_%H%M%S" \ No newline at end of file
diff --git a/test/scripts/e2e_subs/example.py b/test/scripts/e2e_subs/example.py
new file mode 100755
index 000000000..cc7acda2c
--- /dev/null
+++ b/test/scripts/e2e_subs/example.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from goal import Goal
+
+import algosdk.future.transaction as txn
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1])
+
+joe = goal.new_account()
+flo = goal.new_account()
+
+# Pays
+
+pay = goal.pay(goal.account, receiver=joe, amt=10000) # under min balance
+txid, err = goal.send(pay, confirm=False) # errors early
+assert err
+
+pay = goal.pay(goal.account, receiver=joe, amt=500_000)
+txinfo, err = goal.send(pay)
+assert not err, err
+tx = txinfo['txn']['txn']
+assert tx['amt'] == 500_000
+assert tx['fee'] == 1000
+assert goal.balance(joe) == 500_000
+
+# Asset creation
+acfg = goal.acfg(joe,
+ total=10_000, unit_name='oz', asset_name='Gold',
+ freeze=flo)
+txinfo, err = goal.send(acfg)
+assert not err, err
+gold = txinfo['asset-index']
+assert goal.balance(joe, gold) == 10_000
+
+# Asset transfer
+axfer = goal.axfer(joe, goal.account, 50, gold)
+txinfo, err = goal.send(axfer)
+assert err
+assert goal.balance(joe, gold) == 10_000
+
+optin = goal.axfer(goal.account, goal.account, 0, gold)
+txinfo, err = goal.send(optin)
+assert not err, err
+
+axfer = goal.axfer(joe, goal.account, 50, gold)
+txinfo, err = goal.send(axfer)
+assert not err, err
+assert goal.balance(joe, gold) == 9_950
+assert goal.balance(goal.account, gold) == 50
+
+txinfo, err = goal.send(goal.pay(goal.account, receiver=flo, amt=1500_000))
+assert not err, err
+
+# Freezing, and txgroup
+assert not goal.holding(goal.account, gold)[1]
+freeze1 = goal.afrz(flo, gold, goal.account, True)
+freeze2 = goal.afrz(flo, gold, joe, True)
+txinfo, err = goal.send_group([freeze1, freeze2])
+assert not err, err
+assert goal.holding(goal.account, gold)[1]
+assert goal.holding(joe, gold)[1]
+
+# App create
+teal = "test/scripts/e2e_subs/tealprogs"
+approval = goal.assemble(os.path.join(teal, "app-escrow.teal"))
+yes = goal.assemble("#pragma version 2\nint 28") # 28 is just to uniquify
+create = goal.appl(flo, 0,
+ local_schema=(1, 0),
+ global_schema=(0, 4),
+ approval_program=approval,
+ clear_program=yes)
+txinfo, err = goal.send(create)
+app_id = txinfo['application-index']
+assert app_id
+
+# app_create is a convenience wrapper around appl
+create = goal.app_create(flo, approval, local_schema=(1, 0))
+txinfo, err = goal.send(create)
+assert not err, err
+
+app2_id = txinfo['application-index']
+assert app_id
+
+app_info = goal.app_info(app_id)
+assert app_info['local-state-schema']['num-uint'] == 1, app_info
+
+# App opt-in
+optin = goal.appl(joe, app2_id, txn.OnComplete.OptInOC)
+txinfo, err = goal.send(optin)
+assert not err, err
+
+# convenience wrapper
+optin = goal.app_optin(joe, app_id)
+txinfo, err = goal.send(optin)
+assert not err, err
+
+# App call, with group
+deposit = goal.appl(joe, app_id, app_args=["deposit():void"])
+payin = goal.pay(goal.account, goal.app_address(app_id), 150_000)
+txinfo, err = goal.send_group([deposit, payin])
+assert not err, err
+
+app_info = goal.app_info(app_id)
+global_state = goal.app_read(app_id)
+assert global_state[b'debug'] == b'deposit', global_state
+local_state = goal.app_read(app_id, joe)
+assert local_state[b'balance'] == 150_000, local_state
+
+# Pay to logicsig, and spend from there, which requires signing by logicsig
+fund = goal.pay(goal.account, goal.logic_address(yes), 110_000)
+txinfo, err = goal.send(fund)
+assert not err, err
+
+spend = goal.pay(goal.logic_address(yes), joe, 2_000)
+spend = goal.sign_with_program(spend, yes)
+txinfo, err = goal.send(spend)
+assert not err, err
+assert goal.balance(goal.logic_address(yes)) == 107_000, goal.balance(goal.logic_address(yes))
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/goal-partkey-information.sh b/test/scripts/e2e_subs/goal-partkey-information.sh
new file mode 100755
index 000000000..80dbd8d5c
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-partkey-information.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+# errors are handled manually, so no -e
+set -x
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Registered Account ParticipationID Last Used First round Last round
+# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
+OUTPUT=$(goal account listpartkeys|tail -n 1|tr -s ' ')
+if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000000 ]]; then echo "Last round should be 3000000 but wasn't."; exit 1; fi
+
+#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
+#
+#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
+#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
+#Last vote round: 3
+#Last block proposal round: 4
+#Effective first round: 0
+#Effective last round: 3000000
+#First round: 0
+#Last round: 3000000
+#Key dilution: 10000
+#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
+#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
+OUTPUT=$(goal account partkeyinfo)
+if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000000'; then echo "Last round should have been 3000000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000000'; then echo "Effective last round should have been 3000000."; exit 1; fi
+# 100 or 10000 due to arm64 bug
+if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
+
+# Test multiple data directory supported
+OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2"|grep -c 'Participation ID')
+if [[ "$OUTPUT" != "2" ]]; then echo "Two Participation IDs should have been found."; exit 1; fi
+
+# get stderr from this one
+OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
+EXPECTED_ERR="Only one data directory can be specified for this command."
+if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
diff --git a/test/scripts/e2e_subs/goal/.gitignore b/test/scripts/e2e_subs/goal/.gitignore
new file mode 100644
index 000000000..bee8a64b7
--- /dev/null
+++ b/test/scripts/e2e_subs/goal/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/test/scripts/e2e_subs/goal/__init__.py b/test/scripts/e2e_subs/goal/__init__.py
new file mode 100755
index 000000000..2835cc126
--- /dev/null
+++ b/test/scripts/e2e_subs/goal/__init__.py
@@ -0,0 +1 @@
+from .goal import *
diff --git a/test/scripts/e2e_subs/goal/goal.py b/test/scripts/e2e_subs/goal/goal.py
new file mode 100755
index 000000000..921d2c3ea
--- /dev/null
+++ b/test/scripts/e2e_subs/goal/goal.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python
+
+import base64
+import glob
+import os
+import subprocess
+
+import algosdk
+import algosdk.future.transaction as txn
+import algosdk.encoding as enc
+
+
+def text(path):
+ try:
+ return open(path, "rt").read().strip()
+ except FileNotFoundError:
+ return None
+
+
+def kv_to_dict(kv_list):
+ return {
+ base64.b64decode(kv["key"]): (
+ kv["value"]["uint"]
+ if kv["value"]["type"] == 2
+ else base64.b64decode(kv["value"]["bytes"])
+ )
+ for kv in kv_list
+ }
+
+
+class Goal:
+ """Goal offers the convenience of goal to Python.
+
+ Python offers a much better programming environment than shell,
+ but the `goal` command is so convenient that our e2e subs have
+ been written in sh to take advantage of it. On the other hand,
+ goal offers responses in fairly ad-hoc ways, so we find ourselves
+ grepping/awking out IDs, performing contortions around error
+ responses, and so on.
+
+ Using a Goal object, new e2e tests written in Python should be
+ able to create and sign transactions conveniently, submit them,
+ and obtain structured responses. We should add methods as needed
+ to resemble the command-line interfaces offered by goal (but
+ hopefully even better, when it comes to transaction group creation
+ and submission).
+
+ """
+
+ def __init__(
+ self,
+ name=None,
+ *,
+ algorand_data=None,
+ algod_token=None,
+ algod_address=None,
+ kmd_token=None,
+ kmd_address=None,
+ autosend=None,
+ ):
+ self.algod = None
+ self.kmd = None
+ algodata = algorand_data or os.environ.get("ALGORAND_DATA")
+ if algodata:
+ self.algod = self.open_algod(algodata)
+ self.kmd = self.open_kmd(algodata)
+
+ if not self.algod:
+ algod_token = algod_token or os.environ.get("ALGOD_TOKEN")
+ algod_address = algod_address or os.environ.get("ALGOD_ADDRESS")
+ if algod_token and algod_address:
+ self.algod = self.open_algod(algod_token, algod_address)
+
+ assert self.algod, "No datadir or creds for algod"
+
+ if not self.kmd:
+ kmd_token = kmd_token or os.environ.get("KMD_TOKEN")
+ kmd_address = kmd_address or os.environ.get("KMD_ADDRESS")
+ if kmd_token and kmd_address:
+ self.kmd = self.open_kmd(kmd_token, kmd_address)
+
+ if self.kmd:
+ self.open_wallet(name)
+ # internal wallets has address->sk mappings, so we can sign
+ # txns easily, even without kmd.
+ self.internal_wallet = {}
+
+ self.autosend = autosend
+
+ def open_algod(self, algodata, algod_address=None):
+ if algod_address:
+ algod_token = algodata
+ else:
+ algod_token = text(os.path.join(algodata, "algod.token"))
+ net = text(os.path.join(algodata, "algod.net"))
+ if not net:
+ return None
+ algod_address = "http://" + net
+ return algosdk.v2client.algod.AlgodClient(algod_token, algod_address)
+
+ def open_kmd(self, algodata, kmd_address=None):
+ if kmd_address:
+ kmd_token = algodata
+ else:
+ dir = sorted(glob.glob(os.path.join(algodata, "kmd-*")))[-1]
+ if not dir:
+ return None
+ kmd_token = text(os.path.join(dir, "kmd.token"))
+ net = text(os.path.join(dir, "kmd.net"))
+ if not net:
+ return None
+ kmd_address = "http://" + net
+ return algosdk.kmd.KMDClient(kmd_token, kmd_address)
+
+ def open_wallet(self, name):
+ if name:
+ self.wallet_name = name
+ wallet = None
+ for w in self.kmd.list_wallets():
+ if w["name"] == name:
+ wallet = w
+
+ assert wallet, f"No wallet named '{name}'"
+ self.handle = self.kmd.init_wallet_handle(wallet["id"], "")
+ keys = self.kmd.list_keys(self.handle)
+ assert len(keys) == 1
+ self.account = keys[0]
+
+ def sign(self, tx):
+ # If already signed, do nothing (might be SignedTransaction,
+ # LogicSigTransaction, MultisigTransaction)
+ if not isinstance(tx, txn.Transaction):
+ return tx
+ # If we have the key in this object, sign directly
+ sk = self.internal_wallet.get(tx.sender)
+ if sk:
+ return tx.sign(sk)
+ # Ask KMD to sign.
+ if not self.kmd:
+ raise Exception(f"Unable to sign {tx}")
+ try:
+ return self.kmd.sign_transaction(self.handle, "", tx)
+ except algosdk.error.KMDHTTPError:
+ self.open_wallet(self.wallet_name)
+ return self.kmd.sign_transaction(self.handle, "", tx)
+
+ def sign_with_program(self, tx, program, delegator=None):
+ if delegator:
+ raise Exception("haven't implemented delgated logicsig yet")
+ return txn.LogicSigTransaction(tx, txn.LogicSig(program))
+
+ def send(self, tx, confirm=True):
+ try:
+ txid = self.algod.send_transaction(self.sign(tx))
+ if not confirm:
+ return txid, ""
+ return self.confirm(txid), ""
+ except algosdk.error.AlgodHTTPError as e:
+ return (None, str(e))
+
+ def send_group(self, txns, confirm=True):
+ # Need unsigned transactions to calculate the group This pulls
+ # out the unsigned tx if tx is sigged, logigsigged or
+ # multisgged
+ utxns = [
+ tx if isinstance(tx, txn.Transaction) else tx.transaction
+ for tx in txns
+ ]
+ gid = txn.calculate_group_id(utxns)
+ for tx in txns:
+ if isinstance(tx, txn.Transaction):
+ tx.group = gid
+ else:
+ tx.transaction.group = gid
+ try:
+ stxns = [self.sign(tx) for tx in txns]
+ txid = self.algod.send_transactions(stxns)
+ if not confirm:
+ return txid, ""
+ return self.confirm(txid), ""
+ except algosdk.error.AlgodHTTPError as e:
+ return (None, str(e))
+
+ def status(self):
+ return self.algod.status()
+
+ def confirm(self, txid):
+ """Wait for txid to be confirmed by the network."""
+ last_round = self.status().get("last-round")
+ txinfo = self.algod.pending_transaction_info(txid)
+ while txinfo.get("confirmed-round", 0) < 1:
+ last_round += 1
+ self.algod.status_after_block(last_round)
+ txinfo = self.algod.pending_transaction_info(txid)
+ return txinfo
+
+ def wait_for_block(self, block):
+ """
+ Utility function to wait until the block number given has been confirmed
+ """
+ print(f"Waiting for block {block}.")
+ s = self.algod.status()
+ last_round = s["last-round"]
+ while last_round < block:
+ wait_block = min(block, last_round + 3)
+ print(f" waiting for {last_round}...")
+ s = self.algod.status_after_block(wait_block)
+ last_round = s["last-round"]
+ return s
+
+ def new_account(self):
+ key, addr = algosdk.account.generate_account()
+ self.add_account(addr, key)
+ return addr
+
+ def add_account(self, address, key):
+ assert len(address) == 58, address
+ assert len(key) == 88, key
+ self.internal_wallet[address] = key
+
+ def finish(self, tx, send):
+ if send is None:
+ send = self.autosend
+ if send:
+ return self.send(tx, confirm=True)
+ return tx
+
+ def keyreg(self, sender, votekey=None, selkey=None, votefst=None,
+ votelst=None, votekd=None,
+ send=None, **kwargs):
+ params = self.algod.suggested_params()
+ tx = txn.KeyregTxn(sender, params,
+ votekey, selkey, votefst, votelst, votekd,
+ **kwargs)
+ return self.finish(tx, send)
+
+ def pay(self, sender, receiver, amt: int, send=None, **kwargs):
+ params = self.algod.suggested_params()
+ tx = txn.PaymentTxn(sender, params, receiver, amt, **kwargs)
+ return self.finish(tx, send)
+
+ def acfg(self, sender, send=None, **kwargs):
+ params = self.algod.suggested_params()
+ tx = txn.AssetConfigTxn(
+ sender, params, **kwargs, strict_empty_address_check=False
+ )
+ return self.finish(tx, send)
+
+ def asset_create(self, sender, **kwargs):
+ assert not kwargs.pop("index", None)
+ return self.acfg(sender, **kwargs)
+
+ def axfer(self, sender, receiver, amt: int, index: int, send=None, **kwargs):
+ params = self.algod.suggested_params()
+ tx = txn.AssetTransferTxn(
+ sender, params, receiver, amt, index, **kwargs
+ )
+ return self.finish(tx, send)
+
+ def asset_optin(self, sender, index: int, **kwargs):
+ assert not kwargs.pop("receiver", None)
+ return self.axfer(sender, sender, 0, index, **kwargs)
+
+ def afrz(self, sender, index: int, target, frozen, send=None, **kwargs):
+ params = self.algod.suggested_params()
+ tx = txn.AssetFreezeTxn(sender, params, index, target, frozen, **kwargs)
+ return self.finish(tx, send)
+
+ def coerce_schema(self, values):
+ if not values:
+ return None
+ if isinstance(values, txn.StateSchema):
+ return values
+ return txn.StateSchema(num_uints=values[0], num_byte_slices=values[1])
+
+ def appl(self, sender, index: int, on_complete=txn.OnComplete.NoOpOC,
+ send=None, **kwargs):
+ params = self.algod.suggested_params()
+ local_schema = self.coerce_schema(kwargs.pop("local_schema", None))
+ global_schema = self.coerce_schema(kwargs.pop("global_schema", None))
+ tx = txn.ApplicationCallTxn(
+ sender,
+ params,
+ index,
+ on_complete,
+ local_schema=local_schema,
+ global_schema=global_schema,
+ **kwargs,
+ )
+ return self.finish(tx, send)
+
+ def app_create(
+ self,
+ sender,
+ approval_program,
+ clear_program=None,
+ on_complete=txn.OnComplete.NoOpOC,
+ **kwargs,
+ ):
+ assert not kwargs.pop("index", None)
+ if not clear_program:
+ clear_program = self.assemble("#pragma version 2\nint 1")
+ return self.appl(
+ sender,
+ 0,
+ on_complete=on_complete,
+ approval_program=approval_program,
+ clear_program=clear_program,
+ **kwargs,
+ )
+
+ def app_optin(self, sender, index: int, **kwargs):
+ assert not kwargs.pop("on_complete", None)
+ return self.appl(sender, index, on_complete=txn.OnComplete.OptInOC, **kwargs)
+
+ def app_call(self, sender, index: int, **kwargs):
+ return self.appl(sender, index, **kwargs)
+
+ def balance(self, account, asa=None):
+ if asa:
+ return self.holding(account, asa)[0]
+ info = self.algod.account_info(account)
+ return info["amount"]
+
+ def holding(self, account, asa):
+ info = self.algod.account_info(account)
+ for asset in info["assets"]:
+ if asset["asset-id"] == asa:
+ return (asset["amount"], asset["is-frozen"])
+ raise Exception("not opted in")
+
+ def assemble(self, source):
+ try:
+ with open(source, "rb") as f:
+ source = f.read()
+ except OSError:
+ source = source.encode("utf-8")
+
+ # CI runs with Python 3.6, which does not have capture_output.
+ # proc = subprocess.run(["goal", "clerk", "compile", "-"],
+ # input=source, capture_output=True)
+ try:
+ proc = subprocess.run(
+ ["goal", "clerk", "compile", "-"],
+ input=source,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ assert proc.returncode == 0, proc.stderr.decode()
+ return proc.stdout
+ except FileNotFoundError: # no goal
+ return self.assemble_with_rest(source.decode())
+
+ def assemble_with_rest(self, source):
+ compile_response = self.algod.compile(source)
+ return base64.b64decode(compile_response["result"])
+
+ def app_info(self, index: int) -> dict:
+ return self.algod.application_info(index)["params"]
+
+ def app_read(self, index: int, user=None) -> dict:
+ if user:
+ info = self.algod.account_info(user)
+ for ls in info["apps-local-state"]:
+ if ls["id"] == index:
+ return kv_to_dict(ls["key-value"])
+ raise Exception("not opted in")
+ return kv_to_dict(self.app_info(index).get("global-state", []))
+
+ def logic_address(self, bytecode: bytes):
+ return enc.encode_address(enc.checksum(b"Program" + bytecode))
+
+ def app_address(self, app_id: int):
+ return enc.encode_address(enc.checksum(b"appID" + (app_id).to_bytes(8, "big")))
diff --git a/test/scripts/e2e_subs/rest-participation-key.sh b/test/scripts/e2e_subs/rest-participation-key.sh
new file mode 100755
index 000000000..e5f7a30b3
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-participation-key.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Use admin token for both get and post
+export USE_ADMIN=true
+
+pushd "${TEMPDIR}" || exit 1
+
+FIRST_ROUND=0
+# A really large (but arbitrary) last valid round
+LAST_ROUND=120
+
+NAME_OF_TEMP_PARTKEY="tmp.${FIRST_ROUND}.${LAST_ROUND}.partkey"
+
+algokey part generate --first ${FIRST_ROUND} --last ${LAST_ROUND} --keyfile ${NAME_OF_TEMP_PARTKEY} --parent ${ACCOUNT}
+
+popd || exit 1
+
+call_and_verify "Get List of Keys" "/v2/participation" 200 'address' 'effective-first-valid'
+
+RES=""
+call_post_and_verify "Install a basic participation key" "/v2/participation" 200 ${NAME_OF_TEMP_PARTKEY} 'partId'
+
+# Get the returned participation id from the RESULT (aka $RES) variable
+INSTALLED_ID=$(echo "$RES" | python3 -c 'import json,sys;o=json.load(sys.stdin);print(o["partId"])')
+
+# Should contain the installed id
+call_and_verify "Get List of Keys" "/v2/participation" 200 'address' "${INSTALLED_ID}" 'address' 'effective-first-valid'
+
+call_and_verify "Get a specific ID" "/v2/participation/${INSTALLED_ID}" 200 "${INSTALLED_ID}"
+
+# Should return 200 but not return that error message
+call_delete_and_verify "Delete the specific ID" "/v2/participation/${INSTALLED_ID}" 200 false 'participation id not found'
+
+# Verify that it got called previously and now returns an error message saying that no key was found
+call_delete_and_verify "Delete the specific ID" "/v2/participation/${INSTALLED_ID}" 404 true 'participation id not found'
diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh
index fab6f1d51..4613a7ba9 100755
--- a/test/scripts/e2e_subs/rest.sh
+++ b/test/scripts/e2e_subs/rest.sh
@@ -35,21 +35,155 @@ function base_call {
}
+function base_post_call {
+ curl -X POST --data-binary @/${TEMPDIR}/$4 -o "$3" -w "%{http_code}" -q -s -H "Authorization: Bearer $1" "$NET$2"
+}
+
+
+function base_delete_call {
+ curl -X DELETE -o "$3" -w "%{http_code}" -q -s -H "Authorization: Bearer $1" "$NET$2"
+}
+
function call_admin {
base_call "$ADMIN_TOKEN" "$1" "$2"
}
+function call_post_admin {
+ base_post_call "$ADMIN_TOKEN" "$1" "$2" "$3"
+}
+
+function call_delete_admin {
+ base_delete_call "$ADMIN_TOKEN" "$1" "$2" "$3"
+}
function call {
base_call "$PUB_TOKEN" "$1" "$2"
}
+function call_post {
+ base_post_call "$PUB_TOKEN" "$1" "$2"
+}
+
+function call_delete {
+ base_delete_call "$PUB_TOKEN" "$1" "$2"
+}
+
function fail_and_exit {
printf "\n\nFailed test - $1 ($2): $3\n\n"
exit 1
}
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - the file to upload
+# $5... - substring that should be in the response
+function call_post_and_verify {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local FILENAME_TO_UPLOAD="$1"
+ shift
+
+ echo "MATCHING $@"
+ curl_post_test "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" true "$FILENAME_TO_UPLOAD" "$@"
+}
+
+
+# CURL POST Test - POST query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+# $5 - the file to upload
+# $6... - substring(s) that should be in the response
+function curl_post_test {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+ local FILENAME_TO_UPLOAD="$1"
+ shift
+
+ set +e
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_post_admin "$QUERY" "${TEMPDIR}/curl_out.txt" "$FILENAME_TO_UPLOAD")
+ else
+ CODE=$(call_post "$QUERY" "${TEMPDIR}/curl_out.txt" "$FILENAME_TO_UPLOAD")
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+# CURL Test - query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+function call_delete_and_verify {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+
+ local MATCH_RESULT="$1"
+ shift
+
+ set +e
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_delete_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call_delete "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+
+# CURL Test - query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+# $5... - substring(s) that should be in the response
+function curl_test {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+
+ set +e
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
# $1 - test description.
# $2 - query
@@ -67,7 +201,7 @@ function call_and_verify {
curl_test "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" true "$@"
}
-# CURL Test - query and veryify results
+# CURL Test - query and verify results
# $1 - test description.
# $2 - query
# $3 - expected status code
@@ -82,16 +216,52 @@ function curl_test {
shift
local MATCH_RESULT="$1"
shift
- local SUBSTRING
-
- local START=$SECONDS
set +e
- local CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt")
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
if [[ $? != 0 ]]; then
cat $CURL_TEMPFILE
fail_and_exit "$DESCRIPTION" "$QUERY" "curl had a non-zero exit code."
fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+# verify - Common verification code
+# $1 - return code of CURL sub-shell command
+# $2 - HTTP status code
+# $3 - description of test
+# $4 - query to execute
+# $5 - expected HTTP status code to check
+# $6 - match result
+# $7... - substring(s) that should be in the response
+function verify {
+ local SUCCESS=$1
+ shift
+ local CODE=$1
+ shift
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+
+ if [[ $SUCCESS != 0 ]]; then
+ cat $CURL_TEMPFILE
+ fail_and_exit "$DESCRIPTION" "$QUERY" "curl had a non-zero exit code."
+ fi
+
set -e
RES=$(cat "${TEMPDIR}/curl_out.txt")
@@ -99,10 +269,7 @@ function curl_test {
fail_and_exit "$DESCRIPTION" "$QUERY" "unexpected HTTP status code expected $EXPECTED_CODE (actual $CODE): $RES"
fi
- #local ELAPSED=$(($SECONDS - $START))
- #if [[ $ELAPSED -gt $MAX_TIME ]]; then
- # fail_and_exit "$DESCRIPTION" "$QUERY" "query duration too long, $ELAPSED > $MAX_TIME"
- #fi
+ local SUBSTRING
# Check result substrings
for SUBSTRING in "$@"; do
diff --git a/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal b/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal
new file mode 100644
index 000000000..900ee0e54
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal
@@ -0,0 +1,73 @@
+#pragma version 5
+intcblock 1 0
+txn ApplicationID
+intc_1 // 0
+==
+bnz main_l14
+txn OnCompletion
+pushint 5 // DeleteApplication
+==
+bnz main_l13
+txn OnCompletion
+intc_0 // OptIn
+==
+txna ApplicationArgs 0
+pushbytes 0x00056f7074696e // 0x00056f7074696e
+==
+&&
+bnz main_l12
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x0000000000000000 // 0x0000000000000000
+==
+&&
+bnz main_l11
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000102 // 0x000102
+==
+&&
+bnz main_l10
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000d00010200000000000030340003757775 // 0x000d00010200000000000030340003757775
+==
+&&
+bnz main_l9
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000000000000018f000c0019000b73686f756c642070617373000490 // 0x000000000000018f000c0019000b73686f756c642070617373000490
+==
+&&
+bnz main_l8
+intc_1 // 0
+return
+main_l8:
+intc_0 // 1
+return
+main_l9:
+intc_0 // 1
+return
+main_l10:
+intc_0 // 1
+return
+main_l11:
+intc_0 // 1
+return
+main_l12:
+intc_0 // 1
+return
+main_l13:
+intc_0 // 1
+return
+main_l14:
+intc_0 // 1
+return \ No newline at end of file
diff --git a/test/scripts/e2e_subs/tealprogs/app-abi-method-example.teal b/test/scripts/e2e_subs/tealprogs/app-abi-method-example.teal
new file mode 100644
index 000000000..dbc831d7a
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/app-abi-method-example.teal
@@ -0,0 +1,176 @@
+// generated from https://gist.github.com/jasonpaulos/99e4f8a75f2fc2ec9b8073c064530359
+#pragma version 5
+txn ApplicationID
+int 0
+==
+bnz main_l14
+txn OnCompletion
+int OptIn
+==
+txna ApplicationArgs 0
+byte 0xcfa68e36
+==
+&&
+bnz main_l13
+txn OnCompletion
+int CloseOut
+==
+txna ApplicationArgs 0
+byte 0xa9f42b3d
+==
+&&
+bnz main_l12
+txn OnCompletion
+int DeleteApplication
+==
+txna ApplicationArgs 0
+byte 0x24378d3c
+==
+&&
+bnz main_l11
+txn OnCompletion
+int NoOp
+==
+txna ApplicationArgs 0
+byte 0xfe6bdf69
+==
+&&
+bnz main_l10
+txn OnCompletion
+int NoOp
+==
+txna ApplicationArgs 0
+byte 0xa88c26a5
+==
+&&
+bnz main_l9
+txn OnCompletion
+int NoOp
+==
+txna ApplicationArgs 0
+byte 0x3e3b3d28
+==
+&&
+bnz main_l8
+int 0
+return
+main_l8:
+txna ApplicationArgs 1
+callsub sub5
+int 1
+return
+main_l9:
+callsub sub4
+int 1
+return
+main_l10:
+txna ApplicationArgs 1
+txna ApplicationArgs 2
+callsub sub3
+int 1
+return
+main_l11:
+callsub sub2
+int 1
+return
+main_l12:
+callsub sub1
+int 1
+return
+main_l13:
+txna ApplicationArgs 1
+callsub sub0
+int 1
+return
+main_l14:
+int 1
+return
+sub0: // optIn
+store 0
+int 0
+byte "name"
+load 0
+extract 2 0
+app_local_put
+byte "hello "
+int 0
+byte "name"
+app_local_get
+concat
+store 1
+byte 0x151f7c75
+load 1
+len
+itob
+extract 6 2
+concat
+load 1
+concat
+log
+retsub
+sub1: // closeOut
+byte "goodbye "
+int 0
+byte "name"
+app_local_get
+concat
+store 2
+byte 0x151f7c75
+load 2
+len
+itob
+extract 6 2
+concat
+load 2
+concat
+log
+retsub
+sub2: // deleteApp
+txn Sender
+global CreatorAddress
+==
+assert
+retsub
+sub3: // add
+store 4
+store 3
+byte 0x151f7c75
+load 3
+btoi
+load 4
+btoi
++
+itob
+concat
+log
+retsub
+sub4: // empty
+byte "random inconsequential log"
+log
+retsub
+sub5: // payment
+store 5
+txn GroupIndex
+int 1
+-
+gtxns TypeEnum
+int pay
+==
+assert
+byte 0x151f7c75
+txn GroupIndex
+int 1
+-
+gtxns Amount
+load 5
+btoi
+==
+bnz sub5_l2
+byte 0x00
+b sub5_l3
+sub5_l2:
+byte 0x80
+sub5_l3:
+concat
+log
+retsub
diff --git a/test/scripts/e2e_subs/tealprogs/assets-escrow.teal b/test/scripts/e2e_subs/tealprogs/assets-escrow.teal
new file mode 100644
index 000000000..3ddf42a19
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/assets-escrow.teal
@@ -0,0 +1,328 @@
+#pragma version 5
+ // This application accepts these actions on assets
+ // optin():void - opt in the app to an asset
+ // close():void - opt out the app from an asset
+ // deposit():void - deposit assets on app and hold until withdraw is requested;
+ // update the asset balance in app's local state
+ // withdraw(uint64):void - withdraw assets from and update the asset balance in app's local state.
+ // approve if withdraw amount < balance
+ // transfer(uint64):void - app has clawback auth to transfer assets between accounts
+ // create(uint64):void - app creates assets
+ // mint():void - withdraw assets created by app
+ // freeze(uint64):void - freeze/unfreeze an asset on an account
+
+ // ApplicationID is zero in inital creation txn
+ txn ApplicationID
+ bz handle_createapp
+
+ // Handle possible OnCompletion type. We don't have to
+ // worry about handling ClearState, because the
+ // ClearStateProgram will execute in that case, not the
+ // ApprovalProgram.
+
+ txn OnCompletion
+ int NoOp
+ ==
+ bnz handle_noop
+
+ txn OnCompletion
+ int OptIn
+ ==
+ bnz handle_optin
+
+ txn OnCompletion
+ int CloseOut
+ ==
+ bnz handle_closeout
+
+ txn OnCompletion
+ int UpdateApplication
+ ==
+ bnz handle_updateapp
+
+ txn OnCompletion
+ int DeleteApplication
+ ==
+ bnz handle_deleteapp
+ // Unexpected OnCompletion value. Should be unreachable.
+ err
+
+handle_createapp:
+ int 1
+ return
+
+handle_optin:
+ // Let anyone optin with a single txn, with no arguments. If
+ // it's not a single txn, fall through to handle_noop, so that
+ // a deposit can be made while opting in.
+ // We should standardize a behaviour like this in ABI.
+ global GroupSize
+ int 1
+ ==
+ bz handle_noop
+ int 1
+ return
+
+handle_noop:
+ // opt in app to asset to enable axfer
+ txn ApplicationArgs 0
+ byte "optin():void"
+ ==
+ bz not_optin
+ byte "optin"
+ callsub debug
+
+ itxn_begin
+ int axfer
+ itxn_field TypeEnum
+
+ int 0
+ itxn_field AssetAmount
+
+ txna Assets 0
+ itxn_field XferAsset
+
+ global CurrentApplicationAddress
+ itxn_field AssetReceiver
+ itxn_submit
+
+ int 1
+ return
+not_optin:
+ txn ApplicationArgs 0
+ byte "deposit():void"
+ ==
+ bz not_deposit
+
+ byte "deposit"
+ callsub debug
+
+ // Handle a deposit. Next txn slot must axfer our app account
+ txn GroupIndex
+ int 1
+ +
+ dup
+ dup
+
+ gtxns TypeEnum
+ int axfer
+ ==
+ assert
+
+ gtxns AssetReceiver
+ global CurrentApplicationAddress
+ ==
+ assert
+
+ gtxns AssetAmount
+
+ // Track the amount this sender deposited in their local state
+ int 0
+ byte "balance"
+ dup2
+ app_local_get
+ uncover 3 // pull up the Amount
+ +
+ app_local_put
+
+ int 1
+ return
+not_deposit:
+ txn ApplicationArgs 0
+ byte "withdraw(uint64):void"
+ ==
+ bz not_withdraw
+
+ // Handle withdraw.
+
+ int 0
+ byte "balance"
+ dup2
+ app_local_get
+
+ // Subtract the request and replace. Rejects on underflow
+ txn ApplicationArgs 1
+ btoi
+ -
+ app_local_put
+
+ itxn_begin
+ int axfer
+ itxn_field TypeEnum
+
+ txna Assets 0
+ itxn_field XferAsset
+
+ txn ApplicationArgs 1
+ btoi
+ itxn_field AssetAmount
+
+ txn Sender
+ itxn_field AssetReceiver
+ itxn_submit
+
+ int 1
+ return
+not_withdraw:
+ txn ApplicationArgs 0
+ byte "close():void"
+ ==
+ bz not_close
+
+ // Handle close.
+ itxn_begin
+ int axfer
+ itxn_field TypeEnum
+
+ txna Assets 0
+ itxn_field XferAsset
+
+ int 0
+ itxn_field AssetAmount
+
+ txn Sender
+ itxn_field AssetReceiver
+
+ txn Sender
+ itxn_field AssetCloseTo
+ itxn_submit
+
+ int 1
+ return
+not_close:
+ txn ApplicationArgs 0
+ byte "transfer(uint64):void"
+ ==
+ bz not_transfer
+
+ // Handle transfer.
+ itxn_begin
+ int axfer
+ itxn_field TypeEnum
+
+ txna Assets 0
+ itxn_field XferAsset
+
+ txn ApplicationArgs 1
+ btoi
+ itxn_field AssetAmount
+
+ txn Sender
+ itxn_field AssetSender
+
+ txna Accounts 1
+ itxn_field AssetReceiver
+
+ itxn_submit
+
+ int 1
+ return
+
+not_transfer:
+ txn ApplicationArgs 0
+ byte "create(uint64):void"
+ ==
+ bz not_create
+ // Handle create.
+ itxn_begin
+ int acfg
+ itxn_field TypeEnum
+
+ txn ApplicationArgs 1
+ btoi
+ itxn_field ConfigAssetTotal
+ int 0
+ itxn_field ConfigAssetDecimals
+ byte "x"
+ itxn_field ConfigAssetUnitName
+ byte "X"
+ itxn_field ConfigAssetName
+ global CurrentApplicationAddress
+ itxn_field ConfigAssetFreeze
+
+ itxn_submit
+ int 1
+ return
+not_create:
+ txn ApplicationArgs 0
+ byte "mint():void"
+ ==
+ bz not_mint
+ // Handle mint. Next txn slot must pay our app account
+ txn GroupIndex
+ int 1
+ +
+ dup
+ dup
+
+ gtxns TypeEnum
+ int pay
+ ==
+ assert
+
+ gtxns Receiver
+ global CurrentApplicationAddress
+ ==
+ assert
+
+ // mint asset
+ itxn_begin
+ int axfer
+ itxn_field TypeEnum
+
+ txna Assets 0
+ itxn_field XferAsset
+
+ gtxns Amount
+ itxn_field AssetAmount
+
+ txn Sender
+ itxn_field AssetReceiver
+ itxn_submit
+
+ int 1
+ return
+not_mint:
+ txn ApplicationArgs 0
+ byte "freeze(uint64):void"
+ ==
+ bz not_freeze
+
+ //Handle freeze
+ itxn_begin
+ int afrz
+ itxn_field TypeEnum
+
+ txna Assets 0
+ itxn_field FreezeAsset
+
+ txn ApplicationArgs 1
+ btoi
+ itxn_field FreezeAssetFrozen
+
+ txn Sender
+ itxn_field FreezeAssetAccount
+
+ itxn_submit
+
+ int 1
+ return
+not_freeze:
+ // Unknown call "method"
+ err
+
+handle_closeout:
+ int 1
+ return
+
+handle_updateapp:
+handle_deleteapp:
+ txn Sender
+ global CreatorAddress
+ ==
+ return
+debug:
+ byte "debug"
+ swap
+ app_global_put
+ retsub
diff --git a/test/scripts/e2e_subs/tealprogs/logs.teal b/test/scripts/e2e_subs/tealprogs/logs.teal
new file mode 100644
index 000000000..ab835003c
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/logs.teal
@@ -0,0 +1,19 @@
+#pragma version 5
+byte "A"
+loop:
+int 0
+dup2
+getbyte
+int 1
++
+dup
+int 97 //ascii code of last char
+<=
+bz end
+setbyte
+dup
+log
+b loop
+end:
+int 1
+return \ No newline at end of file
diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json
new file mode 100644
index 000000000..aa80a6dad
--- /dev/null
+++ b/test/testdata/configs/config-v17.json
@@ -0,0 +1,96 @@
+{
+ "Version": 17,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/configs/config-v18.json b/test/testdata/configs/config-v18.json
new file mode 100644
index 000000000..aa7a010b3
--- /dev/null
+++ b/test/testdata/configs/config-v18.json
@@ -0,0 +1,96 @@
+{
+ "Version": 18,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/configs/config-v19.json b/test/testdata/configs/config-v19.json
new file mode 100644
index 000000000..fac112201
--- /dev/null
+++ b/test/testdata/configs/config-v19.json
@@ -0,0 +1,97 @@
+{
+ "Version": 19,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 250000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
index b37cf55bc..14b6c6151 100644
--- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
+++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
@@ -201,12 +201,24 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-east-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-EAST-1-c5d.9xl",
"Provider": "AWS",
"Region": "us-east-1",
"BaseConfiguration": "c5d.9xlarge"
},
{
+ "Name": "AWS-US-EAST-1-c5d.18xl",
+ "Provider": "AWS",
+ "Region": "us-east-1",
+ "BaseConfiguration": "c5d.18xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-c5.xlarge",
"Provider": "AWS",
"Region": "us-east-2",
@@ -237,12 +249,24 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-c5d.9xl",
"Provider": "AWS",
"Region": "us-east-2",
"BaseConfiguration": "c5d.9xlarge"
},
{
+ "Name": "AWS-US-EAST-2-c5d.18xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "c5d.18xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTH-1-c5.xlarge",
"Provider": "AWS",
"Region": "ap-south-1",
diff --git a/test/testdata/nettemplates/DevModeOneWallet.json b/test/testdata/nettemplates/DevModeOneWallet.json
new file mode 100644
index 000000000..fd29a927a
--- /dev/null
+++ b/test/testdata/nettemplates/DevModeOneWallet.json
@@ -0,0 +1,22 @@
+{
+ "Genesis": {
+ "NetworkName": "devmodenet",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 100,
+ "Online": true
+ }
+ ],
+ "DevMode": true
+ },
+ "Nodes": [
+ {
+ "Name": "Node",
+ "IsRelay": false,
+ "Wallets": [
+ { "Name": "Wallet1", "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json b/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
index 987aefe9f..8c4fb39f6 100644
--- a/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
+++ b/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
@@ -1,6 +1,8 @@
{
"Genesis": {
"NetworkName": "tbd",
+ "PartKeyDilution": 50,
+ "LastPartKeyRound": 2000,
"Wallets": [
{
"Name": "20pct",
diff --git a/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json b/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json
new file mode 100644
index 000000000..198657b6c
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json
@@ -0,0 +1,36 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/abc54f79f9ad679d2d22f0fb9909fb005c16f8a1",
+ "Wallets": [
+ {
+ "Name": "Online1",
+ "Stake": 90,
+ "Online": true
+ },
+ {
+ "Name": "Online2",
+ "Stake": 10,
+ "Online": false
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Secondary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online2",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json b/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json
new file mode 100644
index 000000000..8c2d2e669
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json
@@ -0,0 +1,36 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "future",
+ "Wallets": [
+ {
+ "Name": "Online1",
+ "Stake": 90,
+ "Online": true
+ },
+ {
+ "Name": "Online2",
+ "Stake": 10,
+ "Online": false
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Secondary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online2",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index ed2ac8897..410bfb0c1 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -177,7 +177,7 @@ func Retry(fn func() error) (err error) {
return LoggedRetry(fn, logging.Base())
}
-// getDecoratedLogger retruns a decorated logger that includes the readonly true/false, caller and extra fields.
+// getDecoratedLogger returns a decorated logger that includes the readonly true/false, caller and extra fields.
func (db *Accessor) getDecoratedLogger(fn idemFn, extras ...interface{}) logging.Logger {
log := db.logger().With("readonly", db.readOnly)
_, file, line, ok := runtime.Caller(3)
diff --git a/util/db/initialize.go b/util/db/initialize.go
new file mode 100644
index 000000000..1662a17b4
--- /dev/null
+++ b/util/db/initialize.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+)
+
+// Migration is used to upgrade a database from one version to the next.
+// The Migration slice is ordered and must contain all prior migrations
+// in order to determine which need to be called.
+type Migration func(ctx context.Context, tx *sql.Tx, newDatabase bool) error
+
+// Initialize creates or upgrades a DB accessor in a new atomic context.
+// The Migration slice is ordered and must contain all prior migrations
+// in order to determine which need to be called.
+func Initialize(accessor Accessor, migrations []Migration) error {
+ return accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return InitializeWithContext(ctx, tx, migrations)
+ })
+}
+
+// InitializeWithContext creates or upgrades a DB accessor.
+func InitializeWithContext(ctx context.Context, tx *sql.Tx, migrations []Migration) error {
+ // check current database version
+ dbVersion, err := GetUserVersion(ctx, tx)
+ if err != nil {
+ return ErrUnableToRead
+ }
+
+ version := int32(len(migrations))
+
+ // if database version is greater than supported by current binary, write a warning. This would keep the existing
+ // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
+ if dbVersion > version {
+ return MakeErrUnknownVersion(dbVersion, version)
+ }
+
+ // if database is not up to date run migration functions.
+ if dbVersion < version {
+ var newDatabase bool
+ for i := dbVersion; i < version; i++ {
+ err = migrations[i](ctx, tx, newDatabase)
+ if err != nil && err != ErrNoOpMigration {
+ return MakeErrUpgradeFailure(dbVersion, i)
+ }
+
+ // Something like this is used by the account DB to conditionally skip things.
+ if i == 0 && err != ErrNoOpMigration {
+ newDatabase = true
+ }
+
+ // update version
+ _, err = SetUserVersion(ctx, tx, i+1)
+ if err != nil {
+ return MakeErrUpgradeFailure(dbVersion, i)
+ }
+ }
+ }
+
+ return nil
+}
+
+// ErrUnableToRead is returned when the accessor cannot be read.
+var ErrUnableToRead = errors.New("unable to read database")
+
+// ErrNoOpMigration is returned when there was no work for the migration to perform.
+var ErrNoOpMigration = errors.New("migration no-op")
+
+// ErrUnknownVersion is returned when a migration to the current version is not available.
+type ErrUnknownVersion struct {
+ CurrentVersion int32
+ SupportedVersion int32
+}
+
+// Error implements the error interface.
+func (err *ErrUnknownVersion) Error() string {
+ return fmt.Sprintf("database schema version is %d, but algod only supports up to %d", err.CurrentVersion, err.SupportedVersion)
+}
+
+// MakeErrUnknownVersion makes an ErrUnknownVersion.
+func MakeErrUnknownVersion(currentVersion, supportedVersion int32) *ErrUnknownVersion {
+ return &ErrUnknownVersion{
+ CurrentVersion: currentVersion,
+ SupportedVersion: supportedVersion,
+ }
+}
+
+// ErrUpgradeFailure is returned when a migration returns an error.
+type ErrUpgradeFailure struct {
+ SchemaVersionFrom int32
+ SchemaVersionTo int32
+}
+
+// Error implements the error interface.
+func (err *ErrUpgradeFailure) Error() string {
+ return fmt.Sprintf("failed to upgrade database from schema %d to %d", err.SchemaVersionFrom, err.SchemaVersionTo)
+}
+
+// MakeErrUpgradeFailure makes an ErrUpgradeFailure.
+func MakeErrUpgradeFailure(from, to int32) *ErrUpgradeFailure {
+ return &ErrUpgradeFailure{
+ SchemaVersionFrom: from,
+ SchemaVersionTo: to,
+ }
+}
diff --git a/util/db/initialize_test.go b/util/db/initialize_test.go
new file mode 100644
index 000000000..46f2ec941
--- /dev/null
+++ b/util/db/initialize_test.go
@@ -0,0 +1,246 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// A few migrations functions to mix and match in tests.
+var (
+ createFoo = func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ _, err := tx.Exec(`CREATE TABLE foo (field INTEGER)`)
+ return err
+ }
+
+ addToFoo = func(amount int) Migration {
+ return func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ _, err := tx.Exec(`INSERT INTO foo (field) VALUES(?)`, amount)
+ return err
+ }
+ }
+
+ returnError = func(err error) Migration {
+ return func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ return err
+ }
+ }
+
+ // Check the sum of the field column.
+ verifyFoo = func(expected int) func(t *testing.T, ctx context.Context, tx *sql.Tx) {
+ return func(t *testing.T, ctx context.Context, tx *sql.Tx) {
+ var field int
+ err := tx.QueryRow(`SELECT COALESCE(SUM(field), 0) FROM foo`).Scan(&field)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, field)
+ }
+ }
+)
+
+func TestInitialize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testcases := []struct {
+ name string
+ migrations []Migration
+ expectedVersion int32
+ verify func(t *testing.T, ctx context.Context, tx *sql.Tx)
+ expectedError error
+ }{
+ {
+ name: "Simple",
+ migrations: []Migration{
+ createFoo,
+ },
+ expectedVersion: 1,
+ verify: verifyFoo(0),
+ },
+ {
+ name: "Multiple",
+ migrations: []Migration{
+ createFoo,
+ addToFoo(1),
+ addToFoo(10),
+ addToFoo(100),
+ addToFoo(1000),
+ },
+ expectedVersion: 5,
+ verify: verifyFoo(1111),
+ },
+ {
+ name: "Error + rollback",
+ migrations: []Migration{
+ createFoo,
+ addToFoo(1),
+ returnError(errors.New("did not finish")),
+ addToFoo(10),
+ },
+ expectedVersion: 0,
+ verify: nil,
+ expectedError: MakeErrUpgradeFailure(0, 2),
+ },
+ }
+
+ for _, testcase := range testcases {
+ testcase := testcase
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ // Setup
+ accessor, err := MakeAccessor("test_"+testcase.name, false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ err = Initialize(accessor, testcase.migrations)
+
+ // Check error.
+ if testcase.expectedError == nil {
+ require.NoError(t, err)
+ } else {
+ require.EqualError(t, err, testcase.expectedError.Error())
+ }
+
+ // Check results.
+ accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ version, err := GetUserVersion(ctx, tx)
+ assert.NoError(t, err)
+ assert.Equal(t, testcase.expectedVersion, version)
+
+ if testcase.verify != nil {
+ testcase.verify(t, ctx, tx)
+ }
+ return nil
+ })
+ })
+ }
+}
+
+func TestReadOnlyError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expiredContext, expiredContextCancelFunc := context.WithCancel(context.Background())
+ expiredContextCancelFunc()
+ err := InitializeWithContext(expiredContext, nil, []Migration{createFoo})
+
+ require.EqualError(t, err, ErrUnableToRead.Error())
+}
+
+func TestUnknownVersionError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accessor, err := MakeAccessor("test-unknown-version", false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ migrations := []Migration{
+ createFoo,
+ addToFoo(1),
+ }
+
+ // Initialize to version 2
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ // Initialize with only version 1
+ err = Initialize(accessor, []Migration{createFoo})
+ require.EqualError(t, err, MakeErrUnknownVersion(2, 1).Error())
+}
+
+func TestNewDBFlag(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var captureNewDB bool
+ newDBCheck := func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ captureNewDB = newDatabase
+ return nil
+ }
+
+ testcases := []struct {
+ name string
+ migrations []Migration
+ expectedNewDB bool
+ }{
+ {
+ name: "no-op-migration-0",
+ migrations: []Migration{
+ returnError(ErrNoOpMigration),
+ newDBCheck,
+ },
+ expectedNewDB: false,
+ },
+ {
+ name: "regular-migration",
+ migrations: []Migration{
+ newDBCheck,
+ newDBCheck,
+ },
+ expectedNewDB: true,
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ accessor, err := MakeAccessor("test_"+testcase.name, false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ err = Initialize(accessor, testcase.migrations)
+ require.NoError(t, err)
+
+ require.Equal(t, testcase.expectedNewDB, captureNewDB)
+ })
+ }
+}
+
+func TestResumeUpgrading(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accessor, err := MakeAccessor("test-resume", false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ // Initialize to version 2
+ migrations := []Migration{
+ createFoo,
+ addToFoo(1),
+ }
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ // Re-initialize and upgrade to version 4
+ migrations = []Migration{
+ createFoo,
+ addToFoo(1),
+ addToFoo(10),
+ addToFoo(100),
+ }
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ verifyFoo(111)(t, ctx, tx)
+ return nil
+ })
+}
diff --git a/util/s3/s3Helper.go b/util/s3/s3Helper.go
index a997f20a1..236911370 100644
--- a/util/s3/s3Helper.go
+++ b/util/s3/s3Helper.go
@@ -199,11 +199,6 @@ func makeS3Session(credentials *credentials.Credentials, bucket string) (helper
return
}
-// GetLatestUpdateVersion returns the latest version details for the 'node' package
-func (helper *Helper) GetLatestUpdateVersion(channel string) (maxVersion uint64, maxVersionName string, err error) {
- return helper.GetUpdateVersion(channel, 0)
-}
-
// GetLatestPackageVersion returns the latest version details for a given package name (eg node, install, tools)
func (helper *Helper) GetLatestPackageVersion(channel string, packageName string) (maxVersion uint64, maxVersionName string, err error) {
return helper.GetPackageVersion(channel, packageName, 0)
@@ -214,12 +209,6 @@ func (helper *Helper) GetLatestPackageFilesVersion(channel string, packagePrefix
return helper.GetPackageFilesVersion(channel, packagePrefix, 0)
}
-// GetUpdateVersion ensures the specified version is present and returns the name of the file, if found
-// Or if specificVersion == 0, returns the name of the file with the max version
-func (helper *Helper) GetUpdateVersion(channel string, specificVersion uint64) (maxVersion uint64, maxVersionName string, err error) {
- return helper.GetPackageVersion(channel, "node", specificVersion)
-}
-
// DownloadFile downloads the specified file to the provided Writer
func (helper *Helper) DownloadFile(name string, writer io.WriterAt) error {
downloader := s3manager.NewDownloader(helper.session)