summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-08-10 09:34:10 -0400
committerGitHub <noreply@github.com>2022-08-10 09:34:10 -0400
commit22c3d69c70e81ac43b78fa18718b615c64648c9a (patch)
treeb7aa55556f03548185d9e824b41621a22ee49e98
parentfca9e64fbb8b38bc3b5c41f2a042b68652a80ebc (diff)
parent24b0cf994ad7592342e90555f219f401f26d43be (diff)
Merge pull request #4383 from Algo-devops-service/relbeta3.9.0v3.9.0-beta
-rw-r--r--.circleci/config.yml132
-rw-r--r--.github/.release.yml20
-rw-r--r--.github/workflows/pr-type-category.yml31
-rw-r--r--Makefile5
-rw-r--r--agreement/msgp_gen.go538
-rw-r--r--agreement/service_test.go7
-rw-r--r--agreement/vote.go2
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go29
-rw-r--r--catchup/ledgerFetcher.go17
-rw-r--r--catchup/service.go6
-rw-r--r--catchup/service_test.go43
-rw-r--r--cmd/algod/main_test.go4
-rw-r--r--cmd/algokey/part.go12
-rw-r--r--cmd/catchpointdump/net.go9
-rw-r--r--cmd/goal/account.go49
-rw-r--r--cmd/goal/application.go31
-rw-r--r--cmd/goal/asset.go24
-rw-r--r--cmd/goal/clerk.go30
-rw-r--r--cmd/goal/common.go1
-rw-r--r--cmd/goal/interact.go5
-rw-r--r--cmd/goal/logging.go9
-rw-r--r--cmd/goal/messages.go68
-rw-r--r--cmd/goal/network.go1
-rw-r--r--cmd/goal/node.go51
-rw-r--r--cmd/goal/node_test.go112
-rw-r--r--cmd/loadgenerator/config.go28
-rw-r--r--cmd/loadgenerator/main.go154
-rw-r--r--cmd/opdoc/opdoc.go16
-rw-r--r--cmd/opdoc/tmLanguage.go2
-rw-r--r--cmd/pingpong/runCmd.go34
-rw-r--r--cmd/tealdbg/README.md2
-rw-r--r--cmd/tealdbg/debugger_test.go1
-rw-r--r--cmd/tealdbg/local.go1
-rw-r--r--cmd/tealdbg/localLedger.go4
-rwxr-xr-xcmd/updater/update.sh88
-rw-r--r--compactcert/builder.go386
-rw-r--r--compactcert/signer.go175
-rw-r--r--compactcert/worker_test.go525
-rw-r--r--components/mocks/mockParticipationRegistry.go11
-rw-r--r--config/config.go6
-rw-r--r--config/consensus.go165
-rw-r--r--config/consensus_test.go8
-rw-r--r--config/localTemplate.go17
-rw-r--r--config/local_defaults.go9
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.go58
-rw-r--r--crypto/batchverifier_test.go14
-rw-r--r--crypto/compactcert/bigfloat.go186
-rw-r--r--crypto/compactcert/bigfloat_test.go167
-rw-r--r--crypto/compactcert/builder.go247
-rw-r--r--crypto/compactcert/common.go117
-rw-r--r--crypto/compactcert/common_test.go131
-rw-r--r--crypto/compactcert/const.go32
-rw-r--r--crypto/compactcert/verifier.go115
-rw-r--r--crypto/crypto_test.go8
-rw-r--r--crypto/curve25519.go27
-rw-r--r--crypto/curve25519_test.go10
-rw-r--r--crypto/falconWrapper.go19
-rw-r--r--crypto/falconWrapper_test.go21
-rw-r--r--crypto/merklearray/merkle.go1
-rw-r--r--crypto/merklearray/proof.go34
-rw-r--r--crypto/merklearray/proof_test.go62
-rw-r--r--crypto/merklesignature/committablePublicKeys.go20
-rw-r--r--crypto/merklesignature/committablePublicKeys_test.go2
-rw-r--r--crypto/merklesignature/const.go29
-rw-r--r--crypto/merklesignature/kats_test.go115
-rw-r--r--crypto/merklesignature/keysBuilder_test.go2
-rw-r--r--crypto/merklesignature/merkleSignatureScheme.go170
-rw-r--r--crypto/merklesignature/merkleSignatureScheme_test.go255
-rw-r--r--crypto/merklesignature/msgp_gen.go193
-rw-r--r--crypto/merklesignature/msgp_gen_test.go60
-rw-r--r--crypto/merklesignature/persistentMerkleSignatureScheme.go8
-rw-r--r--crypto/merklesignature/persistentMerkleSignatureScheme_test.go15
-rw-r--r--crypto/merklesignature/posdivs.go22
-rw-r--r--crypto/merklesignature/posdivs_test.go71
-rw-r--r--crypto/multisig.go2
-rw-r--r--crypto/multisig_test.go12
-rw-r--r--crypto/onetimesig.go24
-rw-r--r--crypto/onetimesig_test.go34
-rw-r--r--crypto/stateproof/builder.go260
-rw-r--r--crypto/stateproof/builder_test.go (renamed from crypto/compactcert/builder_test.go)459
-rw-r--r--crypto/stateproof/coinGenerator.go125
-rw-r--r--crypto/stateproof/coinGenerator_test.go186
-rw-r--r--crypto/stateproof/committableSignatureSlot.go (renamed from crypto/compactcert/committableSignatureSlot.go)25
-rw-r--r--crypto/stateproof/committableSignatureSlot_test.go (renamed from crypto/compactcert/committableSignatureSlot_test.go)16
-rw-r--r--crypto/stateproof/const.go37
-rw-r--r--crypto/stateproof/msgp_gen.go (renamed from crypto/compactcert/msgp_gen.go)825
-rw-r--r--crypto/stateproof/msgp_gen_test.go (renamed from crypto/compactcert/msgp_gen_test.go)102
-rw-r--r--crypto/stateproof/structs.go (renamed from crypto/compactcert/structs.go)77
-rw-r--r--crypto/stateproof/verifier.go154
-rw-r--r--crypto/stateproof/verifier_test.go180
-rw-r--r--crypto/stateproof/weights.go193
-rw-r--r--crypto/stateproof/weights_test.go226
-rw-r--r--daemon/algod/api/algod.oas2.json254
-rw-r--r--daemon/algod/api/algod.oas3.yml340
-rw-r--r--daemon/algod/api/client/restClient.go10
-rw-r--r--daemon/algod/api/server/lib/bundledSpecInject.go3314
-rw-r--r--daemon/algod/api/server/v1/handlers/handlers.go21
-rw-r--r--daemon/algod/api/server/v2/dryrun.go5
-rw-r--r--daemon/algod/api/server/v2/errors.go3
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go305
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go85
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go491
-rw-r--r--daemon/algod/api/server/v2/generated/types.go89
-rw-r--r--daemon/algod/api/server/v2/handlers.go106
-rw-r--r--daemon/algod/api/server/v2/test/handlers_resources_test.go21
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go223
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go12
-rw-r--r--daemon/algod/api/spec/v1/model.go37
-rw-r--r--daemon/algod/api/swagger.json77
-rw-r--r--data/account/msgp_gen.go38
-rw-r--r--data/account/participation.go15
-rw-r--r--data/account/participationRegistry.go100
-rw-r--r--data/account/participationRegistry_test.go346
-rw-r--r--data/account/participation_test.go2
-rw-r--r--data/account/registeryDbOps.go40
-rw-r--r--data/accountManager.go24
-rw-r--r--data/accountManager_test.go77
-rw-r--r--data/basics/overflow.go21
-rw-r--r--data/basics/stateProofParticipant.go (renamed from data/basics/ccertpart.go)16
-rw-r--r--data/basics/units_test.go61
-rw-r--r--data/basics/userBalance.go74
-rw-r--r--data/basics/userBalance_test.go2
-rw-r--r--data/bookkeeping/block.go39
-rw-r--r--data/bookkeeping/block_test.go2
-rw-r--r--data/bookkeeping/lightBlockHeader.go61
-rw-r--r--data/bookkeeping/lightBlockHeader_test.go64
-rw-r--r--data/bookkeeping/msgp_gen.go835
-rw-r--r--data/bookkeeping/msgp_gen_test.go120
-rw-r--r--data/bookkeeping/txn_merkle_test.go2
-rw-r--r--data/ledger_test.go2
-rw-r--r--data/pools/transactionPool.go69
-rw-r--r--data/pools/transactionPool_test.go228
-rw-r--r--data/stateproofmsg/message.go50
-rw-r--r--data/stateproofmsg/msgp_gen.go257
-rw-r--r--data/stateproofmsg/msgp_gen_test.go75
-rw-r--r--data/transactions/keyreg.go2
-rw-r--r--data/transactions/logic/README.md84
-rw-r--r--data/transactions/logic/README_in.md41
-rw-r--r--data/transactions/logic/TEAL_opcodes.md114
-rw-r--r--data/transactions/logic/assembler.go316
-rw-r--r--data/transactions/logic/assembler_test.go191
-rw-r--r--data/transactions/logic/backwardCompat_test.go34
-rw-r--r--data/transactions/logic/doc.go18
-rw-r--r--data/transactions/logic/eval.go265
-rw-r--r--data/transactions/logic/evalAppTxn_test.go60
-rw-r--r--data/transactions/logic/evalCrypto_test.go91
-rw-r--r--data/transactions/logic/evalStateful_test.go44
-rw-r--r--data/transactions/logic/eval_test.go191
-rw-r--r--data/transactions/logic/fields.go196
-rw-r--r--data/transactions/logic/fields_string.go49
-rw-r--r--data/transactions/logic/fields_test.go4
-rw-r--r--data/transactions/logic/langspec.json176
-rw-r--r--data/transactions/logic/ledger_test.go25
-rw-r--r--data/transactions/logic/opcodes.go52
-rw-r--r--data/transactions/logic/opcodes_test.go2
-rw-r--r--data/transactions/logic/sourcemap.go17
-rw-r--r--data/transactions/logic/sourcemap_test.go12
-rw-r--r--data/transactions/logic/teal.tmLanguage.json6
-rw-r--r--data/transactions/msgp_gen.go456
-rw-r--r--data/transactions/msgp_gen_test.go120
-rw-r--r--data/transactions/stateproof.go (renamed from data/transactions/compactcert.go)41
-rw-r--r--data/transactions/transaction.go40
-rw-r--r--data/transactions/transaction_test.go136
-rw-r--r--data/transactions/verify/txn.go64
-rw-r--r--data/transactions/verify/txn_test.go54
-rw-r--r--data/transactions/verify/verifiedTxnCache.go2
-rw-r--r--data/transactions/verify/verifiedTxnCache_test.go12
-rw-r--r--data/txHandler.go8
-rw-r--r--data/txntest/txn.go17
-rw-r--r--gen/generate.go2
-rw-r--r--gen/generate_test.go10
-rw-r--r--go.mod3
-rw-r--r--go.sum6
-rw-r--r--installer/config.json.example9
-rw-r--r--ledger/accountdb.go1741
-rw-r--r--ledger/accountdb_test.go1321
-rw-r--r--ledger/acctonline.go914
-rw-r--r--ledger/acctonline_test.go1691
-rw-r--r--ledger/acctupdates.go328
-rw-r--r--ledger/acctupdates_test.go503
-rw-r--r--ledger/applications_test.go76
-rw-r--r--ledger/apply/apply.go8
-rw-r--r--ledger/apply/keyreg.go13
-rw-r--r--ledger/apply/keyreg_test.go29
-rw-r--r--ledger/apply/stateproof.go71
-rw-r--r--ledger/archival_test.go30
-rw-r--r--ledger/blockHeaderCache.go86
-rw-r--r--ledger/blockHeaderCache_test.go94
-rw-r--r--ledger/catchpointfileheader.go38
-rw-r--r--ledger/catchpointtracker.go935
-rw-r--r--ledger/catchpointtracker_test.go828
-rw-r--r--ledger/catchpointwriter.go210
-rw-r--r--ledger/catchpointwriter_test.go141
-rw-r--r--ledger/catchupaccessor.go118
-rw-r--r--ledger/evalbench_test.go3
-rw-r--r--ledger/evalindexer.go13
-rw-r--r--ledger/evalindexer_test.go5
-rw-r--r--ledger/internal/appcow_test.go8
-rw-r--r--ledger/internal/applications.go14
-rw-r--r--ledger/internal/applications_test.go10
-rw-r--r--ledger/internal/apptxn_test.go14
-rw-r--r--ledger/internal/compactcert.go179
-rw-r--r--ledger/internal/compactcert_test.go176
-rw-r--r--ledger/internal/cow.go36
-rw-r--r--ledger/internal/cow_test.go8
-rw-r--r--ledger/internal/double_test.go18
-rw-r--r--ledger/internal/eval.go228
-rw-r--r--ledger/internal/eval_blackbox_test.go366
-rw-r--r--ledger/internal/eval_test.go134
-rw-r--r--ledger/internal/prefetcher/prefetcher.go2
-rw-r--r--ledger/internal/prefetcher/prefetcher_alignment_test.go28
-rw-r--r--ledger/internal/prefetcher/prefetcher_test.go13
-rw-r--r--ledger/ledger.go126
-rw-r--r--ledger/ledger_perf_test.go8
-rw-r--r--ledger/ledger_test.go1113
-rw-r--r--ledger/ledgercore/accountdata.go35
-rw-r--r--ledger/ledgercore/msgp_gen.go160
-rw-r--r--ledger/ledgercore/msgp_gen_test.go60
-rw-r--r--ledger/ledgercore/onlineacct.go4
-rw-r--r--ledger/ledgercore/statedelta.go51
-rw-r--r--ledger/ledgercore/totals.go10
-rw-r--r--ledger/ledgercore/votersForRound.go62
-rw-r--r--ledger/lruonlineaccts.go121
-rw-r--r--ledger/lruonlineaccts_test.go196
-rw-r--r--ledger/msgp_gen.go1022
-rw-r--r--ledger/msgp_gen_test.go240
-rw-r--r--ledger/onlineaccountscache.go147
-rw-r--r--ledger/onlineaccountscache_test.go240
-rw-r--r--ledger/persistedonlineaccts_list.go144
-rw-r--r--ledger/persistedonlineaccts_list_test.go176
-rw-r--r--ledger/testing/initState.go9
-rw-r--r--ledger/testing/randomAccounts.go39
-rw-r--r--ledger/tracker.go172
-rw-r--r--ledger/tracker_test.go22
-rw-r--r--ledger/trackerdb.go145
-rw-r--r--ledger/txtail.go266
-rw-r--r--ledger/txtail_test.go201
-rw-r--r--ledger/voters.go163
-rw-r--r--ledger/voters_test.go231
-rw-r--r--libgoal/libgoal.go24
-rw-r--r--libgoal/transactions.go6
-rw-r--r--logging/collector.go2
-rw-r--r--logging/log.go5
-rw-r--r--logging/telemetryConfig_test.go5
-rw-r--r--logging/telemetryhook_test.go30
-rw-r--r--logging/telemetryspec/metric.go44
-rw-r--r--logging/telemetryspec/metric_test.go41
-rw-r--r--netdeploy/network.go9
-rw-r--r--netdeploy/networkTemplates_test.go6
-rw-r--r--netdeploy/network_test.go7
-rw-r--r--network/wsNetwork.go6
-rw-r--r--network/wsNetwork_test.go8
-rw-r--r--network/wsPeer.go2
-rw-r--r--node/netprio.go2
-rw-r--r--node/node.go86
-rw-r--r--node/node_test.go54
-rw-r--r--protocol/consensus.go15
-rw-r--r--protocol/hash.go21
-rw-r--r--protocol/msgp_gen.go108
-rw-r--r--protocol/stateproof.go (renamed from protocol/compactcerts.go)31
-rw-r--r--protocol/tags.go2
-rw-r--r--protocol/txntype.go4
-rwxr-xr-xscripts/dump_genesis.sh29
-rwxr-xr-xscripts/travis/codegen_verification.sh3
-rw-r--r--shared/pingpong/pingpong.go126
-rw-r--r--stateproof/abstractions.go (renamed from compactcert/abstractions.go)15
-rw-r--r--stateproof/builder.go463
-rw-r--r--stateproof/db.go (renamed from compactcert/db.go)24
-rw-r--r--stateproof/db_test.go (renamed from compactcert/db_test.go)2
-rw-r--r--stateproof/msgp_gen.go (renamed from compactcert/msgp_gen.go)44
-rw-r--r--stateproof/msgp_gen_test.go (renamed from compactcert/msgp_gen_test.go)2
-rw-r--r--stateproof/recovery.go42
-rw-r--r--stateproof/signer.go177
-rw-r--r--stateproof/stateproofMessageGenerator.go146
-rw-r--r--stateproof/stateproofMessageGenerator_test.go409
-rw-r--r--stateproof/verify/stateproof.go179
-rw-r--r--stateproof/verify/stateproof_test.go168
-rw-r--r--stateproof/worker.go (renamed from compactcert/worker.go)42
-rw-r--r--stateproof/worker_test.go1291
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp9
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go1
-rwxr-xr-x[-rw-r--r--]test/e2e-go/cli/goal/expect/corsTest.exp0
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp37
-rw-r--r--test/e2e-go/cli/goal/expect/testInfraTest.exp4
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go159
-rw-r--r--test/e2e-go/features/compactcert/compactcert_test.go165
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go8
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go4
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go1127
-rw-r--r--test/e2e-go/features/transactions/application_test.go5
-rw-r--r--test/e2e-go/features/transactions/asset_test.go38
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go11
-rw-r--r--test/e2e-go/features/transactions/proof_test.go26
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go37
-rw-r--r--test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go2
-rw-r--r--test/e2e-go/restAPI/restClient_test.go7
-rw-r--r--test/e2e-go/upgrades/stateproof_participation_test.go (renamed from test/e2e-go/upgrades/stateproof_test.go)0
-rw-r--r--test/framework/fixtures/expectFixture.go6
-rw-r--r--test/framework/fixtures/libgoalFixture.go34
-rw-r--r--test/framework/fixtures/restClientFixture.go2
-rw-r--r--test/heapwatch/client_ram_report.py43
-rwxr-xr-xtest/heapwatch/start.sh2
-rw-r--r--test/muleCI/mule.yaml18
-rwxr-xr-xtest/scripts/e2e.sh29
-rwxr-xr-xtest/scripts/e2e_client_runner.py107
-rwxr-xr-xtest/scripts/e2e_subs/asset-misc.sh54
-rwxr-xr-xtest/scripts/e2e_subs/assets-app-b.sh2
-rwxr-xr-xtest/scripts/e2e_subs/assets-app.sh2
-rwxr-xr-xtest/scripts/e2e_subs/hdr-access-logicsig.sh63
-rwxr-xr-xtest/scripts/e2e_subs/hdr-access.py94
-rwxr-xr-xtest/scripts/e2e_subs/rekey.sh39
-rwxr-xr-xtest/scripts/e2e_subs/sectok-app.sh2
-rw-r--r--test/scripts/e2e_subs/tealprogs/quine.map2
-rw-r--r--test/scripts/tps.py81
-rw-r--r--test/testdata/configs/config-v23.json106
-rw-r--r--test/testdata/consensus/catchpointtestingprotocol.json2
-rw-r--r--test/testdata/deployednettemplates/hosttemplates/hosttemplates.json182
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/Makefile15
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/README.md29
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/configs/node.json10
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/configs/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/configs/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/gen_topology.py142
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/genesis.json744
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/net.json6196
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/topology.json1164
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/Makefile13
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py27
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/genesis.json164
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/net.json864
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/node.json23
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/topology.json156
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/Makefile18
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/gen_topology.py33
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/genesis.json2589
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/net.json4749
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/node.json23
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/nonPartNode.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/topology.json544
-rw-r--r--test/testdata/nettemplates/CompactCert.json41
-rw-r--r--test/testdata/nettemplates/RichAccountStateProof.json31
-rw-r--r--test/testdata/nettemplates/StateProof.json31
-rw-r--r--test/testdata/nettemplates/StateProofMultiWallets.json63
-rw-r--r--tools/debug/algodump/main.go2
-rw-r--r--util/db/dbutil.go13
-rw-r--r--util/db/dbutil_test.go109
-rw-r--r--util/db/initialize.go12
-rw-r--r--util/db/interfaces.go (renamed from util/db/queryable.go)10
356 files changed, 47730 insertions, 10942 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 56b1c9d80..5cfc869fc 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -15,6 +15,9 @@ parameters:
result_path:
type: string
default: "/tmp/build_test_results"
+ valid_nightly_branch:
+ type: string
+ default: /hotfix\/.*/
executors:
amd64_medium:
@@ -35,16 +38,27 @@ executors:
resource_class: arm.large
mac_amd64_medium:
macos:
- xcode: 12.0.1
+ xcode: 13.4.1
resource_class: medium
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_amd64_large:
macos:
- xcode: 12.0.1
+ xcode: 13.4.1
resource_class: large
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
+ mac_arm64: &executor-mac-arm64
+ machine: true
+ resource_class: algorand/macstadium-m1
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "true"
+ # these are required b/c jobs explicitly assign sizes to the executors
+ # for `mac_arm64` there is only one size
+ mac_arm64_medium:
+ <<: *executor-mac-arm64
+ mac_arm64_large:
+ <<: *executor-mac-arm64
workflows:
version: 2
@@ -57,6 +71,23 @@ workflows:
matrix: &matrix-default
parameters:
platform: ["amd64", "arm64", "mac_amd64"]
+ filters: &filters-default
+ branches:
+ ignore:
+ - /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
+
+ - build_nightly:
+ name: << matrix.platform >>_build_nightly
+ matrix: &matrix-nightly
+ parameters:
+ platform: ["amd64", "arm64", "mac_amd64", "mac_arm64"]
+ filters: &filters-nightly
+ branches:
+ only:
+ - /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
+ context: slack-secrets
- test:
name: << matrix.platform >>_test
@@ -64,23 +95,13 @@ workflows:
<<: *matrix-default
requires:
- << matrix.platform >>_build
- filters: &filters-default
- branches:
- ignore:
- - /rel\/.*/
- - /hotfix\/.*/
- test_nightly:
name: << matrix.platform >>_test_nightly
matrix:
- <<: *matrix-default
+ <<: *matrix-nightly
requires:
- - << matrix.platform >>_build
- filters: &filters-nightly
- branches:
- only:
- - /rel\/.*/
- - /hotfix\/.*/
+ - << matrix.platform >>_build_nightly
context: slack-secrets
- integration:
@@ -89,17 +110,13 @@ workflows:
<<: *matrix-default
requires:
- << matrix.platform >>_build
- filters:
- <<: *filters-default
- integration_nightly:
name: << matrix.platform >>_integration_nightly
matrix:
- <<: *matrix-default
+ <<: *matrix-nightly
requires:
- - << matrix.platform >>_build
- filters:
- <<: *filters-nightly
+ - << matrix.platform >>_build_nightly
context: slack-secrets
- e2e_expect:
@@ -108,17 +125,13 @@ workflows:
<<: *matrix-default
requires:
- << matrix.platform >>_build
- filters:
- <<: *filters-default
- e2e_expect_nightly:
name: << matrix.platform >>_e2e_expect_nightly
matrix:
- <<: *matrix-default
+ <<: *matrix-nightly
requires:
- - << matrix.platform >>_build
- filters:
- <<: *filters-nightly
+ - << matrix.platform >>_build_nightly
context: slack-secrets
- e2e_subs:
@@ -127,17 +140,13 @@ workflows:
<<: *matrix-default
requires:
- << matrix.platform >>_build
- filters:
- <<: *filters-default
- e2e_subs_nightly:
name: << matrix.platform >>_e2e_subs_nightly
matrix:
- <<: *matrix-default
+ <<: *matrix-nightly
requires:
- - << matrix.platform >>_build
- filters:
- <<: *filters-nightly
+ - << matrix.platform >>_build_nightly
context:
- slack-secrets
- aws-secrets
@@ -147,14 +156,24 @@ workflows:
matrix:
parameters:
platform: ["amd64", "arm64", "mac_amd64"]
- job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
+ job_type: ["test", "integration", "e2e_expect"]
+ requires:
+ - << matrix.platform >>_<< matrix.job_type >>
+
+ - tests_verification_job_nightly:
+ name: << matrix.platform >>_<< matrix.job_type >>_verification
+ matrix:
+ parameters:
+ platform: ["amd64", "arm64", "mac_amd64", "mac_arm64"]
+ job_type: ["test_nightly", "integration_nightly", "e2e_expect_nightly"]
requires:
- << matrix.platform >>_<< matrix.job_type >>
+ context: slack-secrets
- upload_binaries:
name: << matrix.platform >>_upload_binaries
matrix:
- <<: *matrix-default
+ <<: *matrix-nightly
requires:
- << matrix.platform >>_test_nightly_verification
- << matrix.platform >>_integration_nightly_verification
@@ -168,6 +187,7 @@ workflows:
context:
- slack-secrets
- aws-secrets
+
#- windows_x64_build
commands:
@@ -187,6 +207,7 @@ commands:
- run:
working_directory: /tmp
command: |
+ sudo rm -rf << parameters.build_dir >>
sudo mkdir -p << parameters.build_dir >>
sudo chown -R $USER:$GROUP << parameters.build_dir >>
@@ -378,6 +399,7 @@ commands:
no_output_timeout: << parameters.no_output_timeout >>
command: |
set -x
+ export CI_E2E_FILENAME="${CIRCLE_BRANCH/\//-}"
export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
export KMD_NOUSB=True
export GOPATH="<< parameters.build_dir >>/go"
@@ -483,6 +505,21 @@ jobs:
- prepare_go
- generic_build
+ build_nightly:
+ parameters:
+ platform:
+ type: string
+ executor: << parameters.platform >>_medium
+ working_directory: << pipeline.parameters.build_dir >>/project
+ steps:
+ - prepare_build_dir
+ - checkout
+ - prepare_go
+ - generic_build
+ - slack/notify: &slack-fail-event
+ event: fail
+ template: basic_fail_1
+
test:
parameters:
platform:
@@ -514,9 +551,8 @@ jobs:
result_subdir: << parameters.platform >>_test_nightly
no_output_timeout: 45m
- upload_coverage
- - slack/notify: &slack-fail-event
- event: fail
- template: basic_fail_1
+ - slack/notify:
+ <<: *slack-fail-event
integration:
parameters:
@@ -614,7 +650,10 @@ jobs:
working_directory: << pipeline.parameters.build_dir >>/project
environment:
E2E_TEST_FILTER: "SCRIPTS"
- E2E_PLATFORM: << parameters.platform >>
+ CI_PLATFORM: << parameters.platform >>
+ # This platform is arbitrary, basically we just want to keep temps for
+ # one of the platforms in the matrix.
+ CI_KEEP_TEMP_PLATFORM: "amd64"
steps:
- prepare_build_dir
- prepare_go
@@ -659,6 +698,23 @@ jobs:
- tests_verification_command:
result_subdir: << parameters.platform >>_<< parameters.job_type >>
+ tests_verification_job_nightly:
+ docker:
+ - image: python:3.9.6-alpine
+ resource_class: small
+ working_directory: << pipeline.parameters.build_dir >>/project
+ parameters:
+ platform: # platform: ["amd64", "arm64", "mac_amd64"]
+ type: string
+ job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
+ type: string
+ steps:
+ - checkout
+ - tests_verification_command:
+ result_subdir: << parameters.platform >>_<< parameters.job_type >>
+ - slack/notify:
+ <<: *slack-fail-event
+
upload_binaries:
working_directory: << pipeline.parameters.build_dir >>/project
parameters:
diff --git a/.github/.release.yml b/.github/.release.yml
new file mode 100644
index 000000000..1ea816c33
--- /dev/null
+++ b/.github/.release.yml
@@ -0,0 +1,20 @@
+changelog:
+ exclude:
+ labels:
+ - Skip-Release-Notes
+ categories:
+ - title: Bugfixes
+ labels:
+ - Bug-Fix
+ - title: New Features
+ labels:
+ - New Feature
+ - title: Enhancements
+ labels:
+ - Enhancement
+ - title: Not Yet Enabled
+ labels:
+ - Not-Yet-Enabled
+ - title: Other
+ labels:
+ - "*"
diff --git a/.github/workflows/pr-type-category.yml b/.github/workflows/pr-type-category.yml
index 678499ed3..8dd4cfcd2 100644
--- a/.github/workflows/pr-type-category.yml
+++ b/.github/workflows/pr-type-category.yml
@@ -9,29 +9,16 @@ jobs:
runs-on: ubuntu-latest
name: Check PR Category and Type
steps:
- - name: "Failed to find proper PR Type label. Please add one of the following: 'New Feature', 'Enhancement', or 'Bug-Fix'"
- run: exit 1
- if: |
- !contains(github.event.pull_request.labels.*.name, 'New Feature') &&
- !contains(github.event.pull_request.labels.*.name, 'Enhancement') &&
- !contains(github.event.pull_request.labels.*.name, 'Bug-Fix')
- - name: "Found more than one PR Type label. Please add only one of the following: 'New Feature', 'Enhancement', or 'Bug-Fix'"
- run: exit 1
- if: |
- (
- contains(github.event.pull_request.labels.*.name, 'New Feature') &&
- contains(github.event.pull_request.labels.*.name, 'Enhancement')
- ) || (
- contains(github.event.pull_request.labels.*.name, 'New Feature') &&
- contains(github.event.pull_request.labels.*.name, 'Bug-Fix')
- ) || (
- contains(github.event.pull_request.labels.*.name, 'Enhancement') &&
- contains(github.event.pull_request.labels.*.name, 'Bug-Fix')
- )
- - name: "PR Category is missing from PR title. Please add it like '<category>: <pr title>'"
+ - name: Checking for correct number of required github pr labels
+ uses: mheap/github-action-required-labels@v2
+ with:
+ mode: exactly
+ count: 1
+ labels: "New Feature, Enhancement, Bug-Fix, Not-Yet-Enabled, Skip-Release-Notes"
+
+ - name: "Checking for PR Category in PR title. Should be like '<category>: <pr title>'."
run: |
if [[ ! "${{ github.event.pull_request.title }}" =~ ^.{2,}\:.{2,} ]]; then
+ echo "## PR Category is missing from PR title. Please add it like '<category>: <pr title>'." >> GITHUB_STEP_SUMMARY
exit 1
fi
- - name: "Found at least one PR Type label and Category in the title. Good job!"
- run: exit 0
diff --git a/Makefile b/Makefile
index f55089890..0860d71f7 100644
--- a/Makefile
+++ b/Makefile
@@ -86,7 +86,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... ))
-MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/compactcert ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./compactcert ./data/account ./daemon/algod/api/spec/v2
+MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./stateproof ./data/account ./daemon/algod/api/spec/v2
default: build
@@ -248,6 +248,9 @@ $(GOPATH1)/bin/%:
test: build
$(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 1h -coverprofile=coverage.txt -covermode=atomic
+benchcheck: build
+ $(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -run ^NOTHING -bench Benchmark -benchtime 1x -timeout 1h
+
fulltest: build-race
$(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 1h -coverprofile=coverage.txt -covermode=atomic
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 4f2252b87..e5508cd47 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -1358,87 +1358,87 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0004Len := uint32(29)
var zb0004Mask uint64 /* 37 bits */
- if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x40
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
zb0004Len--
zb0004Mask |= 0x100
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
zb0004Len--
zb0004Mask |= 0x200
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
zb0004Len--
zb0004Mask |= 0x4000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
+ if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
zb0004Len--
zb0004Mask |= 0x8000
}
- if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
+ if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x10000
}
- if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
zb0004Len--
zb0004Mask |= 0x20000
}
- if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
zb0004Len--
zb0004Mask |= 0x100000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
+ if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000000
}
- if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 {
zb0004Len--
zb0004Mask |= 0x4000000
}
@@ -1478,81 +1478,61 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendMapHeader(o, zb0004Len)
if zb0004Len != 0 {
if (zb0004Mask & 0x40) == 0 { // if not empty
- // string "cc"
- o = append(o, 0xa2, 0x63, 0x63)
- if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert)))
- }
- zb0001_keys := make([]protocol.CompactCertType, 0, len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert))
- for zb0001 := range (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(protocol.SortCompactCertType(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001]
- _ = zb0002
- o = zb0001.MarshalMsg(o)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x80) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0004Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0004Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0004Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0004Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0004Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0004Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0004Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0004Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "partupdrmv"
o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
@@ -1564,46 +1544,66 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
}
}
- if (zb0004Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0004Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ // string "spt"
+ o = append(o, 0xa3, 0x73, 0x70, 0x74)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking)))
+ }
+ zb0001_keys := make([]protocol.StateProofType, 0, len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking))
+ for zb0001 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(protocol.SortStateProofType(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
@@ -1856,34 +1856,34 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0007 bool
zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ if zb0006 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
if zb0007 {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
}
for zb0006 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert", zb0001)
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking", zb0001)
return
}
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
}
if zb0004 > 0 {
@@ -2112,39 +2112,39 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TxnCounter")
return
}
- case "cc":
+ case "spt":
var zb0011 int
var zb0012 bool
zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "CompactCert")
+ if zb0011 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
if zb0012 {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
}
for zb0011 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert", zb0001)
+ err = msgp.WrapError(err, "StateProofTracking", zb0001)
return
}
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
var zb0013 int
@@ -2221,9 +2221,9 @@ func (_ *proposal) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *proposal) Msgsize() (s int) {
- s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 3 + msgp.MapHeaderSize
- if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert != nil {
- for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert {
+ s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize
+ if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking != nil {
+ for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking {
_ = zb0001
_ = zb0002
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
@@ -2239,7 +2239,7 @@ func (z *proposal) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *proposal) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -3120,91 +3120,91 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0004Len := uint32(30)
var zb0004Mask uint64 /* 37 bits */
- if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x80
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x100
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
zb0004Len--
zb0004Mask |= 0x200
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
zb0004Len--
zb0004Mask |= 0x400
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x4000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
zb0004Len--
zb0004Mask |= 0x8000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
+ if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
zb0004Len--
zb0004Mask |= 0x10000
}
- if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
+ if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x20000
}
- if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
zb0004Len--
zb0004Mask |= 0x40000
}
- if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x100000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
+ if (*z).PriorVote.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200000
}
- if (*z).PriorVote.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
zb0004Len--
zb0004Mask |= 0x400000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
+ if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x4000000
}
- if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
+ if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x8000000
}
- if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0 {
zb0004Len--
zb0004Mask |= 0x10000000
}
@@ -3244,81 +3244,61 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendMapHeader(o, zb0004Len)
if zb0004Len != 0 {
if (zb0004Mask & 0x80) == 0 { // if not empty
- // string "cc"
- o = append(o, 0xa2, 0x63, 0x63)
- if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert)))
- }
- zb0001_keys := make([]protocol.CompactCertType, 0, len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert))
- for zb0001 := range (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(protocol.SortCompactCertType(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001]
- _ = zb0002
- o = zb0001.MarshalMsg(o)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x100) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0004Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0004Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0004Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0004Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0004Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0004Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0004Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0004Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "partupdrmv"
o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
@@ -3330,51 +3310,71 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
}
}
- if (zb0004Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "pv"
o = append(o, 0xa2, 0x70, 0x76)
o = (*z).PriorVote.MarshalMsg(o)
}
- if (zb0004Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0004Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
+ // string "spt"
+ o = append(o, 0xa3, 0x73, 0x70, 0x74)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking)))
+ }
+ zb0001_keys := make([]protocol.StateProofType, 0, len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking))
+ for zb0001 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(protocol.SortStateProofType(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
@@ -3627,34 +3627,34 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0007 bool
zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ if zb0006 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
if zb0007 {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
}
for zb0006 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert", zb0001)
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking", zb0001)
return
}
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
}
if zb0004 > 0 {
@@ -3891,39 +3891,39 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TxnCounter")
return
}
- case "cc":
+ case "spt":
var zb0011 int
var zb0012 bool
zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "CompactCert")
+ if zb0011 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
if zb0012 {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
}
for zb0011 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert", zb0001)
+ err = msgp.WrapError(err, "StateProofTracking", zb0001)
return
}
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
var zb0013 int
@@ -4006,9 +4006,9 @@ func (_ *transmittedPayload) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *transmittedPayload) Msgsize() (s int) {
- s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 3 + msgp.MapHeaderSize
- if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert != nil {
- for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert {
+ s = 3 + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.Round.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Branch.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.Seed.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID) + 3 + (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize
+ if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking != nil {
+ for zb0001, zb0002 := range (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking {
_ = zb0001
_ = zb0002
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
@@ -4024,7 +4024,7 @@ func (z *transmittedPayload) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *transmittedPayload) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -4696,87 +4696,87 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0004Len := uint32(29)
var zb0004Mask uint64 /* 35 bits */
- if len((*z).Block.BlockHeader.CompactCert) == 0 {
+ if (*z).Block.BlockHeader.RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x40
}
- if (*z).Block.BlockHeader.RewardsState.RewardsLevel == 0 {
+ if (*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80
}
- if (*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
+ if (*z).Block.BlockHeader.RewardsState.RewardsResidue == 0 {
zb0004Len--
zb0004Mask |= 0x100
}
- if (*z).Block.BlockHeader.RewardsState.RewardsResidue == 0 {
+ if (*z).Block.BlockHeader.GenesisID == "" {
zb0004Len--
zb0004Mask |= 0x200
}
- if (*z).Block.BlockHeader.GenesisID == "" {
+ if (*z).Block.BlockHeader.GenesisHash.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400
}
- if (*z).Block.BlockHeader.GenesisHash.MsgIsZero() {
+ if (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800
}
- if (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
+ if (*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000
}
- if (*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
+ if (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000
}
- if (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
+ if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
zb0004Len--
zb0004Mask |= 0x4000
}
- if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
+ if (*z).OriginalPeriod == 0 {
zb0004Len--
zb0004Mask |= 0x8000
}
- if (*z).OriginalPeriod == 0 {
+ if (*z).OriginalProposer.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x10000
}
- if (*z).OriginalProposer.MsgIsZero() {
+ if len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
zb0004Len--
zb0004Mask |= 0x20000
}
- if len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ if (*z).Block.BlockHeader.Branch.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40000
}
- if (*z).Block.BlockHeader.Branch.MsgIsZero() {
+ if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80000
}
- if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
+ if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 {
zb0004Len--
zb0004Mask |= 0x100000
}
- if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 {
+ if (*z).Block.BlockHeader.Round.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200000
}
- if (*z).Block.BlockHeader.Round.MsgIsZero() {
+ if (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400000
}
- if (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
+ if (*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800000
}
- if (*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
+ if (*z).SeedProof.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000000
}
- if (*z).SeedProof.MsgIsZero() {
+ if (*z).Block.BlockHeader.Seed.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x2000000
}
- if (*z).Block.BlockHeader.Seed.MsgIsZero() {
+ if len((*z).Block.BlockHeader.StateProofTracking) == 0 {
zb0004Len--
zb0004Mask |= 0x4000000
}
@@ -4816,81 +4816,61 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendMapHeader(o, zb0004Len)
if zb0004Len != 0 {
if (zb0004Mask & 0x40) == 0 { // if not empty
- // string "cc"
- o = append(o, 0xa2, 0x63, 0x63)
- if (*z).Block.BlockHeader.CompactCert == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).Block.BlockHeader.CompactCert)))
- }
- zb0001_keys := make([]protocol.CompactCertType, 0, len((*z).Block.BlockHeader.CompactCert))
- for zb0001 := range (*z).Block.BlockHeader.CompactCert {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(protocol.SortCompactCertType(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).Block.BlockHeader.CompactCert[zb0001]
- _ = zb0002
- o = zb0001.MarshalMsg(o)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x80) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0004Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0004Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0004Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Block.BlockHeader.GenesisID)
}
- if (zb0004Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0004Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0004Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).OriginalPeriod))
}
- if (zb0004Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).OriginalProposer.MarshalMsg(o)
}
- if (zb0004Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "partupdrmv"
o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
@@ -4902,46 +4882,66 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
}
}
- if (zb0004Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0004Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).SeedProof.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Block.BlockHeader.Seed.MarshalMsg(o)
}
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
+ // string "spt"
+ o = append(o, 0xa3, 0x73, 0x70, 0x74)
+ if (*z).Block.BlockHeader.StateProofTracking == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Block.BlockHeader.StateProofTracking)))
+ }
+ zb0001_keys := make([]protocol.StateProofType, 0, len((*z).Block.BlockHeader.StateProofTracking))
+ for zb0001 := range (*z).Block.BlockHeader.StateProofTracking {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(protocol.SortStateProofType(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Block.BlockHeader.StateProofTracking[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
@@ -5194,34 +5194,34 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
var zb0007 bool
zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ if zb0006 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
if zb0007 {
- (*z).Block.BlockHeader.CompactCert = nil
- } else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
+ (*z).Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).Block.BlockHeader.StateProofTracking == nil {
+ (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
}
for zb0006 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert", zb0001)
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking", zb0001)
return
}
- (*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
}
if zb0004 > 0 {
@@ -5450,39 +5450,39 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err, "TxnCounter")
return
}
- case "cc":
+ case "spt":
var zb0011 int
var zb0012 bool
zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "CompactCert")
+ if zb0011 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
if zb0012 {
- (*z).Block.BlockHeader.CompactCert = nil
- } else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
+ (*z).Block.BlockHeader.StateProofTracking = nil
+ } else if (*z).Block.BlockHeader.StateProofTracking == nil {
+ (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
}
for zb0011 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 bookkeeping.CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 bookkeeping.StateProofTrackingData
zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert", zb0001)
+ err = msgp.WrapError(err, "StateProofTracking", zb0001)
return
}
- (*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
var zb0013 int
@@ -5559,9 +5559,9 @@ func (_ *unauthenticatedProposal) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *unauthenticatedProposal) Msgsize() (s int) {
- s = 3 + 4 + (*z).Block.BlockHeader.Round.Msgsize() + 5 + (*z).Block.BlockHeader.Branch.Msgsize() + 5 + (*z).Block.BlockHeader.Seed.Msgsize() + 4 + (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).Block.BlockHeader.GenesisID) + 3 + (*z).Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 3 + msgp.MapHeaderSize
- if (*z).Block.BlockHeader.CompactCert != nil {
- for zb0001, zb0002 := range (*z).Block.BlockHeader.CompactCert {
+ s = 3 + 4 + (*z).Block.BlockHeader.Round.Msgsize() + 5 + (*z).Block.BlockHeader.Branch.Msgsize() + 5 + (*z).Block.BlockHeader.Seed.Msgsize() + 4 + (*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).Block.BlockHeader.GenesisID) + 3 + (*z).Block.BlockHeader.GenesisHash.Msgsize() + 5 + (*z).Block.BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).Block.BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).Block.BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize
+ if (*z).Block.BlockHeader.StateProofTracking != nil {
+ for zb0001, zb0002 := range (*z).Block.BlockHeader.StateProofTracking {
_ = zb0001
_ = zb0002
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
@@ -5577,7 +5577,7 @@ func (z *unauthenticatedProposal) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *unauthenticatedProposal) MsgIsZero() bool {
- return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.CompactCert) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
+ return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.StateProofTracking) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/agreement/service_test.go b/agreement/service_test.go
index ff1d19d87..b5ec2ce61 100644
--- a/agreement/service_test.go
+++ b/agreement/service_test.go
@@ -882,8 +882,9 @@ func simulateAgreementWithLedgerFactory(t *testing.T, numNodes int, numRounds in
activityMonitor.waitForQuiet()
zeroes := expectNewPeriod(clocks, 0)
- // run round with current consensus version first
- zeroes = runRound(clocks, activityMonitor, zeroes, FilterTimeout(0, protocol.ConsensusCurrentVersion))
+ // run round with round-specific consensus version first (since fix in #1896)
+ version, _ := baseLedger.ConsensusVersion(ParamsRound(startRound))
+ zeroes = runRound(clocks, activityMonitor, zeroes, FilterTimeout(0, version))
for j := 1; j < numRounds; j++ {
version, _ := baseLedger.ConsensusVersion(ParamsRound(baseLedger.NextRound() + basics.Round(j-1)))
zeroes = runRound(clocks, activityMonitor, zeroes, FilterTimeout(0, version))
@@ -1967,7 +1968,7 @@ func TestAgreementSlowPayloadsPostDeadline(t *testing.T) {
activityMonitor.waitForQuiet()
zeroes = expectNoNewPeriod(clocks, zeroes)
- triggerGlobalTimeout(FilterTimeout(0, version), clocks, activityMonitor)
+ triggerGlobalTimeout(FilterTimeout(1, version), clocks, activityMonitor)
zeroes = expectNewPeriod(clocks, zeroes)
}
diff --git a/agreement/vote.go b/agreement/vote.go
index ea2920c44..e39edd7b7 100644
--- a/agreement/vote.go
+++ b/agreement/vote.go
@@ -129,7 +129,7 @@ func (uv unauthenticatedVote) verify(l LedgerReader) (vote, error) {
ephID := basics.OneTimeIDForRound(rv.Round, m.Record.KeyDilution(proto))
voteID := m.Record.VoteID
- if !voteID.Verify(ephID, rv, uv.Sig, proto.EnableBatchVerification) {
+ if !voteID.Verify(ephID, rv, uv.Sig) {
return vote{}, fmt.Errorf("unauthenticatedVote.verify: could not verify FS signature on vote by %v given %v: %+v", rv.Sender, voteID, uv)
}
diff --git a/buildnumber.dat b/buildnumber.dat
index d00491fd7..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-1
+0
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 87bd19b12..5d01fa960 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -19,6 +19,7 @@ package catchup
import (
"context"
"fmt"
+ "github.com/algorand/go-algorand/stateproof"
"sync"
"time"
@@ -464,6 +465,19 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
return nil
}
+// lookbackForStateproofsSupport calculates the lookback (from topblock round) needed to be downloaded
+// in order to support state proofs verification.
+func lookbackForStateproofsSupport(topBlock *bookkeeping.Block) uint64 {
+ proto := config.Consensus[topBlock.CurrentProtocol]
+ if proto.StateProofInterval == 0 {
+ return 0
+ }
+ lowestStateProofRound := stateproof.GetOldestExpectedStateProof(&topBlock.BlockHeader)
+ // in order to be able to confirm lowestStateProofRound we need to have round number: (lowestStateProofRound - stateproofInterval)
+ lowestStateProofRound = lowestStateProofRound.SubSaturate(basics.Round(proto.StateProofInterval))
+ return uint64(topBlock.Round().SubSaturate(lowestStateProofRound))
+}
+
// processStageBlocksDownload is the fourth catchpoint catchup stage. It downloads all the reminder of the blocks, verifying each one of them against it's predecessor.
func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
topBlock, err := cs.ledgerAccessor.EnsureFirstBlock(cs.ctx)
@@ -471,10 +485,17 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
return cs.abort(fmt.Errorf("processStageBlocksDownload failed, unable to ensure first block : %v", err))
}
- // pick the lookback with the greater of either MaxTxnLife or MaxBalLookback
- lookback := config.Consensus[topBlock.CurrentProtocol].MaxTxnLife
- if lookback < config.Consensus[topBlock.CurrentProtocol].MaxBalLookback {
- lookback = config.Consensus[topBlock.CurrentProtocol].MaxBalLookback
+ // pick the lookback with the greater of either (MaxTxnLife+DeeperBlockHeaderHistory)
+ // or MaxBalLookback
+ proto := config.Consensus[topBlock.CurrentProtocol]
+ lookback := proto.MaxTxnLife + proto.DeeperBlockHeaderHistory
+ if lookback < proto.MaxBalLookback {
+ lookback = proto.MaxBalLookback
+ }
+
+ lookbackForStateProofSupport := lookbackForStateproofsSupport(&topBlock)
+ if lookback < lookbackForStateProofSupport {
+ lookback = lookbackForStateProofSupport
}
// in case the effective lookback is going before our rounds count, trim it there.
// ( a catchpoint is generated starting round MaxBalLookback, and this is a possible in any round in the range of MaxBalLookback..MaxTxnLife)
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index 0b22c66a2..afc39414d 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -166,20 +166,9 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
return fmt.Errorf("getPeerLedger received a tar header with data size of %d", header.Size)
}
balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
-
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- err = fmt.Errorf("getPeerLedger received io.EOF while reading from tar file stream prior of reaching chunk size %d / %d", readComplete, header.Size)
- }
- return err
- }
+ _, err = io.ReadFull(tarReader, balancesBlockBytes)
+ if err != nil {
+ return err
}
start := time.Now()
err = lf.processBalancesBlock(ctx, header.Name, balancesBlockBytes, &downloadProgress)
diff --git a/catchup/service.go b/catchup/service.go
index f022a3dcc..adc313db6 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -57,7 +57,7 @@ type Ledger interface {
EnsureBlock(block *bookkeeping.Block, c agreement.Certificate)
LastRound() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
- IsWritingCatchpointFile() bool
+ IsWritingCatchpointDataFile() bool
Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error)
AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error
}
@@ -493,7 +493,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
}
// if we're writing a catchpoint file, stop catching up to reduce the memory pressure. Once we finish writing the file we
// could resume with the catchup.
- if s.ledger.IsWritingCatchpointFile() {
+ if s.ledger.IsWritingCatchpointDataFile() {
s.log.Info("Catchup is stopping due to catchpoint file being written")
s.suspendForCatchpointWriting = true
return
@@ -554,7 +554,7 @@ func (s *Service) periodicSync() {
continue
}
// check to see if we're currently writing a catchpoint file. If so, wait longer before attempting again.
- if s.ledger.IsWritingCatchpointFile() {
+ if s.ledger.IsWritingCatchpointDataFile() {
// keep the existing sleep duration and try again later.
continue
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 6ab2f2217..679ee3239 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
@@ -726,7 +727,7 @@ func (m *mockedLedger) LookupAgreement(basics.Round, basics.Address) (basics.Onl
return basics.OnlineAccountData{}, errors.New("not needed for mockedLedger")
}
-func (m *mockedLedger) IsWritingCatchpointFile() bool {
+func (m *mockedLedger) IsWritingCatchpointDataFile() bool {
return false
}
@@ -791,6 +792,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) {
t.Fatal(err)
return
}
+ defer remote.Close()
addBlocks(t, remote, blk, numBlocks-1)
// Create a network and block service
@@ -956,7 +958,7 @@ func TestServiceStartStop(t *testing.T) {
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, ledger, &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
s.Start()
s.Stop()
- _, ok := (<-s.done)
+ _, ok := <-s.done
require.False(t, ok)
}
@@ -972,3 +974,40 @@ func TestSynchronizingTime(t *testing.T) {
atomic.StoreInt64(&s.syncStartNS, 1000000)
require.NotEqual(t, time.Duration(0), s.SynchronizingTime())
}
+
+func TestDownloadBlocksToSupportStateProofs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // make sure we download enough blocks to verify state proof 512
+ topBlk := bookkeeping.Block{}
+ topBlk.BlockHeader.Round = 1500
+ topBlk.BlockHeader.CurrentProtocol = protocol.ConsensusCurrentVersion
+ trackingData := bookkeeping.StateProofTrackingData{StateProofNextRound: 512}
+ topBlk.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ topBlk.BlockHeader.StateProofTracking[protocol.StateProofBasic] = trackingData
+
+ lookback := lookbackForStateproofsSupport(&topBlk)
+ oldestRound := topBlk.BlockHeader.Round.SubSaturate(basics.Round(lookback))
+ assert.Equal(t, uint64(oldestRound), 512-config.Consensus[protocol.ConsensusFuture].StateProofInterval)
+
+ // the network has made progress and now it is on round 8000. in this case we would not download blocks to cover 512.
+ // instead, we will download blocks to confirm only the recovery period lookback.
+ topBlk = bookkeeping.Block{}
+ topBlk.BlockHeader.Round = 8000
+ topBlk.BlockHeader.CurrentProtocol = protocol.ConsensusCurrentVersion
+ trackingData = bookkeeping.StateProofTrackingData{StateProofNextRound: 512}
+ topBlk.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ topBlk.BlockHeader.StateProofTracking[protocol.StateProofBasic] = trackingData
+
+ lookback = lookbackForStateproofsSupport(&topBlk)
+ oldestRound = topBlk.BlockHeader.Round.SubSaturate(basics.Round(lookback))
+
+ lowestRoundToRetain := 8000 - (8000 % 256) - (config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval * (config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals + 1))
+ assert.Equal(t, uint64(oldestRound), lowestRoundToRetain)
+
+ topBlk = bookkeeping.Block{}
+ topBlk.BlockHeader.Round = 8000
+ topBlk.BlockHeader.CurrentProtocol = protocol.ConsensusV32
+ lookback = lookbackForStateproofsSupport(&topBlk)
+ assert.Equal(t, uint64(0), lookback)
+}
diff --git a/cmd/algod/main_test.go b/cmd/algod/main_test.go
index 3b0097962..13fa72092 100644
--- a/cmd/algod/main_test.go
+++ b/cmd/algod/main_test.go
@@ -29,9 +29,7 @@ import (
)
func BenchmarkAlgodStartup(b *testing.B) {
- tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkAlgodStartup")
- require.NoError(b, err)
- defer os.RemoveAll(tmpDir)
+ tmpDir := b.TempDir()
genesisFile, err := ioutil.ReadFile("../../installer/genesis/devnet/genesis.json")
require.NoError(b, err)
diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go
index 41383cedb..3fb7e49c2 100644
--- a/cmd/algokey/part.go
+++ b/cmd/algokey/part.go
@@ -24,6 +24,7 @@ import (
"github.com/spf13/cobra"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/util"
@@ -75,6 +76,7 @@ var partGenerateCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Cannot open partkey database %s: %v\n", partKeyfile, err)
os.Exit(1)
}
+ defer partdb.Close()
fmt.Println("Please stand by while generating keys. This might take a few minutes...")
@@ -97,6 +99,9 @@ var partGenerateCmd = &cobra.Command{
fmt.Println("Participation key generation successful")
printPartkey(partkey.Participation)
+
+ version := config.GetCurrentVersion()
+ fmt.Println("\nGenerated with algokey v" + version.String())
},
}
@@ -112,6 +117,7 @@ var partInfoCmd = &cobra.Command{
}
partkey, err := account.RestoreParticipation(partdb)
+ partdb.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err)
os.Exit(1)
@@ -138,6 +144,7 @@ var partReparentCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Cannot open partkey database %s: %v\n", partKeyfile, err)
os.Exit(1)
}
+ defer partdb.Close()
partkey, err := account.RestoreParticipation(partdb)
if err != nil {
@@ -161,8 +168,9 @@ func printPartkey(partkey account.Participation) {
fmt.Printf("Parent address: %s\n", partkey.Parent.String())
fmt.Printf("VRF public key: %s\n", base64.StdEncoding.EncodeToString(partkey.VRF.PK[:]))
fmt.Printf("Voting public key: %s\n", base64.StdEncoding.EncodeToString(partkey.Voting.OneTimeSignatureVerifier[:]))
- if partkey.StateProofSecrets != nil && !partkey.StateProofSecrets.GetVerifier().IsEmpty() {
- fmt.Printf("State proof key: %s\n", base64.StdEncoding.EncodeToString(partkey.StateProofSecrets.GetVerifier()[:]))
+ if partkey.StateProofSecrets != nil && !partkey.StateProofSecrets.GetVerifier().MsgIsZero() {
+ fmt.Printf("State proof key: %s\n", base64.StdEncoding.EncodeToString(partkey.StateProofSecrets.GetVerifier().Commitment[:]))
+ fmt.Printf("State proof key lifetime: %d\n", partkey.StateProofSecrets.GetVerifier().KeyLifetime)
}
fmt.Printf("First round: %d\n", partkey.FirstValid)
fmt.Printf("Last round: %d\n", partkey.LastValid)
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 113ab7899..9e8dc4561 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -203,15 +203,12 @@ func doDownloadCatchpoint(url string, wdReader util.WatchdogStreamReader, out io
for {
n, err := wdReader.Read(tempBytes)
- if err == io.EOF {
- return nil
- }
- if err != nil {
+ if err != nil && err != io.EOF {
return err
}
totalBytes += n
- _, err = out.Write(tempBytes[:n])
- if err != nil {
+ writtenBytes, err := out.Write(tempBytes[:n])
+ if err != nil || n != writtenBytes {
return err
}
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index c78ed0872..6cd3b9a87 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -139,8 +139,9 @@ func init() {
rewardsCmd.MarkFlagRequired("address")
// changeOnlineStatus flags
- changeOnlineCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to change (required if no -partkeyfile)")
- changeOnlineCmd.Flags().StringVarP(&partKeyFile, "partkeyfile", "", "", "Participation key file (required if no -account)")
+ changeOnlineCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to change (required if no --partkeyfile)")
+ changeOnlineCmd.Flags().StringVarP(&partKeyFile, "partkeyfile", "", "", "Participation key file (required if no --address)")
+ changeOnlineCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different due to rekeying")
changeOnlineCmd.Flags().BoolVarP(&online, "online", "o", true, "Set this account to online or offline")
changeOnlineCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)")
changeOnlineCmd.Flags().Uint64VarP(&firstValid, "firstRound", "", 0, "")
@@ -203,6 +204,7 @@ func init() {
// markNonparticipatingCmd flags
markNonparticipatingCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to change")
markNonparticipatingCmd.MarkFlagRequired("address")
+ markNonparticipatingCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from address due to rekeying")
markNonparticipatingCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)")
markNonparticipatingCmd.Flags().Uint64VarP(&firstValid, "firstRound", "", 0, "")
markNonparticipatingCmd.Flags().Uint64VarP(&firstValid, "firstvalid", "", 0, "FirstValid for the status change transaction (0 for current)")
@@ -783,13 +785,11 @@ var changeOnlineCmd = &cobra.Command{
checkTxValidityPeriodCmdFlags(cmd)
if accountAddress == "" && partKeyFile == "" {
- fmt.Printf("Must specify one of --address or --partkeyfile\n")
- os.Exit(1)
+ reportErrorf("Must specify one of --address or --partkeyfile\n")
}
if partKeyFile != "" && !online {
- fmt.Printf("Going offline does not support --partkeyfile\n")
- os.Exit(1)
+ reportErrorf("Going offline does not support --partkeyfile\n")
}
dataDir := ensureSingleDataDir()
@@ -805,14 +805,12 @@ var changeOnlineCmd = &cobra.Command{
if partKeyFile != "" {
partdb, err := db.MakeErasableAccessor(partKeyFile)
if err != nil {
- fmt.Printf("Cannot open partkey %s: %v\n", partKeyFile, err)
- os.Exit(1)
+ reportErrorf("Cannot open partkey %s: %v\n", partKeyFile, err)
}
partkey, err := algodAcct.RestoreParticipation(partdb)
if err != nil {
- fmt.Printf("Cannot load partkey %s: %v\n", partKeyFile, err)
- os.Exit(1)
+ reportErrorf("Cannot load partkey %s: %v\n", partKeyFile, err)
}
part = &partkey.Participation
@@ -821,7 +819,7 @@ var changeOnlineCmd = &cobra.Command{
}
}
- firstTxRound, lastTxRound, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstTxRound, lastTxRound, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf(err.Error())
}
@@ -858,9 +856,14 @@ func changeAccountOnlineStatus(
// Sign & broadcast the transaction
wh, pw := ensureWalletHandleMaybePassword(dataDir, wallet, true)
- txid, err := client.SignAndBroadcastTransaction(wh, pw, utx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, utx)
+ if err != nil {
+ return fmt.Errorf(errorSigningTX, err)
+ }
+
+ txid, err := client.BroadcastTransaction(signedTxn)
if err != nil {
- return fmt.Errorf(errorOnlineTX, err)
+ return fmt.Errorf(errorBroadcastingTX, err)
}
fmt.Printf("Transaction id for status change transaction: %s\n", txid)
@@ -902,6 +905,9 @@ var addParticipationKeyCmd = &cobra.Command{
}
reportInfof("Participation key generation successful. Participation ID: %s\n", part.ID())
+
+ version := config.GetCurrentVersion()
+ fmt.Println("\nGenerated with goal v" + version.String())
},
}
@@ -912,7 +918,7 @@ var installParticipationKeyCmd = &cobra.Command{
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
if !partKeyDeleteInput {
- fmt.Println(
+ reportErrorf(
`The installpartkey command deletes the input participation file on
successful installation. Please acknowledge this by passing the
"--delete-input" flag to the installpartkey command. You can make
@@ -922,7 +928,6 @@ forward security. Storing old participation keys compromises overall
system security.
No --delete-input flag specified, exiting without installing key.`)
- os.Exit(1)
}
dataDir := ensureSingleDataDir()
@@ -990,6 +995,9 @@ var renewParticipationKeyCmd = &cobra.Command{
if err != nil {
reportErrorf(err.Error())
}
+
+ version := config.GetCurrentVersion()
+ fmt.Println("\nGenerated with goal v" + version.String())
},
}
@@ -1422,7 +1430,7 @@ var markNonparticipatingCmd = &cobra.Command{
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
- firstTxRound, lastTxRound, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstTxRound, lastTxRound, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf(errorConstructingTX, err)
}
@@ -1441,9 +1449,14 @@ var markNonparticipatingCmd = &cobra.Command{
// Sign & broadcast the transaction
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- txid, err := client.SignAndBroadcastTransaction(wh, pw, utx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, utx)
+ if err != nil {
+ reportErrorf(errorSigningTX, err)
+ }
+
+ txid, err := client.BroadcastTransaction(signedTxn)
if err != nil {
- reportErrorf(errorOnlineTX, err)
+ reportErrorf(errorBroadcastingTX, err)
}
fmt.Printf("Transaction id for mark-nonparticipating transaction: %s\n", txid)
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 907a7b291..884c3d6e6 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -461,7 +461,7 @@ var createAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -478,7 +478,7 @@ var createAppCmd = &cobra.Command{
if outFilename == "" {
// Broadcast
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -536,7 +536,7 @@ var updateAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -553,7 +553,7 @@ var updateAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -606,7 +606,7 @@ var optInAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -623,7 +623,7 @@ var optInAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -676,7 +676,7 @@ var closeOutAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -693,7 +693,7 @@ var closeOutAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -746,7 +746,7 @@ var clearAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -763,7 +763,7 @@ var clearAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -816,7 +816,7 @@ var callAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -833,7 +833,7 @@ var callAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -886,7 +886,7 @@ var deleteAppCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -903,7 +903,7 @@ var deleteAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -927,7 +927,6 @@ var deleteAppCmd = &cobra.Command{
err = writeDryrunReqToFile(client, tx, outFilename)
} else {
err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
-
}
if err != nil {
reportErrorf(err.Error())
@@ -1308,7 +1307,7 @@ var methodAppCmd = &cobra.Command{
appCallTxn.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index 9befa127d..5826fd083 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -283,7 +283,7 @@ var createAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -298,7 +298,7 @@ var createAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -362,7 +362,7 @@ var destroyAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -377,7 +377,7 @@ var destroyAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -455,7 +455,7 @@ var configAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -470,7 +470,7 @@ var configAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -540,7 +540,7 @@ var sendAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -557,7 +557,7 @@ var sendAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -611,7 +611,7 @@ var freezeAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -626,7 +626,7 @@ var freezeAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -698,7 +698,7 @@ var optinAssetCmd = &cobra.Command{
tx.Note = parseNoteField(cmd)
tx.Lease = parseLease(cmd)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -715,7 +715,7 @@ var optinAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 000304fa3..2be5ff332 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -230,7 +230,16 @@ func writeSignedTxnsToFile(stxns []transactions.SignedTxn, filename string) erro
}
func writeTxnToFile(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction, filename string) error {
- stxn, err := createSignedTransaction(client, signTx, dataDir, walletName, tx, basics.Address{})
+ var authAddr basics.Address
+ var err error
+ if signerAddress != "" {
+ authAddr, err = basics.UnmarshalChecksumAddress(signerAddress)
+ if err != nil {
+ reportErrorf("Signer invalid (%s): %v", signerAddress, err)
+ }
+ }
+
+ stxn, err := createSignedTransaction(client, signTx, dataDir, walletName, tx, authAddr)
if err != nil {
return err
}
@@ -375,7 +384,7 @@ var sendCmd = &cobra.Command{
}
}
client := ensureFullClient(dataDir)
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf(err.Error())
}
@@ -415,7 +424,7 @@ var sendCmd = &cobra.Command{
CurrentProtocol: proto,
},
}
- groupCtx, err := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, blockHeader)
+ groupCtx, err := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, blockHeader, nil)
if err == nil {
err = verify.LogicSigSanityCheck(&uncheckedTxn, 0, groupCtx)
}
@@ -433,7 +442,17 @@ var sendCmd = &cobra.Command{
}
} else {
signTx := sign || (outFilename == "")
- stx, err = createSignedTransaction(client, signTx, dataDir, walletName, payment, basics.Address{})
+ var authAddr basics.Address
+ if signerAddress != "" {
+ if !signTx {
+ reportErrorf("Signer specified when txn won't be signed")
+ }
+ authAddr, err = basics.UnmarshalChecksumAddress(signerAddress)
+ if err != nil {
+ reportErrorf("Signer invalid (%s): %v", signerAddress, err)
+ }
+ }
+ stx, err = createSignedTransaction(client, signTx, dataDir, walletName, payment, authAddr)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -806,7 +825,7 @@ var signCmd = &cobra.Command{
}
var groupCtx *verify.GroupContext
if lsig.Logic != nil {
- groupCtx, err = verify.PrepareGroupContext(txnGroup, contextHdr)
+ groupCtx, err = verify.PrepareGroupContext(txnGroup, contextHdr, nil)
if err != nil {
// this error has to be unsupported protocol
reportErrorf("%s: %v", txFilename, err)
@@ -1143,6 +1162,7 @@ var dryrunCmd = &cobra.Command{
reportErrorf("program size too large: %d > %d", len(txn.Lsig.Logic), params.LogicSigMaxSize)
}
ep := logic.NewEvalParams(txgroup, &params, nil)
+ ep.SigLedger = logic.NoHeaderLedger{}
err := logic.CheckSignature(i, ep)
if err != nil {
reportErrorf("program failed Check: %s", err)
diff --git a/cmd/goal/common.go b/cmd/goal/common.go
index 56dde3af7..893fcd176 100644
--- a/cmd/goal/common.go
+++ b/cmd/goal/common.go
@@ -67,4 +67,5 @@ func addTxnFlags(cmd *cobra.Command) {
cmd.Flags().BoolVar(&dumpForDryrun, "dryrun-dump", false, "Dump in dryrun format acceptable by dryrun REST api")
cmd.Flags().Var(&dumpForDryrunFormat, "dryrun-dump-format", "Dryrun dump format: "+dumpForDryrunFormat.AllowedString())
cmd.Flags().StringSliceVar(&dumpForDryrunAccts, "dryrun-accounts", nil, "additional accounts to include into dryrun request obj")
+ cmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from transaction \"from\" address due to rekeying")
}
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 6b157a50d..45fb5bf8a 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -54,6 +54,7 @@ func init() {
appExecuteCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID (if omitted, zero, which creates an application)")
appExecuteCmd.Flags().StringVarP(&account, "from", "f", "", "Account to execute interaction from")
+ appExecuteCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from \"from\" address due to rekeying")
appExecuteCmd.Flags().SetInterspersed(false)
appExecuteCmd.MarkFlagRequired("from")
}
@@ -595,7 +596,7 @@ var appExecuteCmd = &cobra.Command{
tx.Lease = parseLease(cmd)
// Fill in rounds, fee, etc.
- fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ fv, lv, _, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf("Cannot determine last valid round: %s", err)
}
@@ -611,7 +612,7 @@ var appExecuteCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
if err != nil {
reportErrorf(errorSigningTX, err)
}
diff --git a/cmd/goal/logging.go b/cmd/goal/logging.go
index ace99abbf..a54b46031 100644
--- a/cmd/goal/logging.go
+++ b/cmd/goal/logging.go
@@ -123,6 +123,8 @@ var loggingSendCmd = &cobra.Command{
modifier := ""
counter := uint(1)
+ errcount := 0
+ var firsterr error = nil
onDataDirs(func(dataDir string) {
cfg, err := logging.EnsureTelemetryConfig(&dataDir, "")
if err != nil {
@@ -138,9 +140,16 @@ var loggingSendCmd = &cobra.Command{
for err := range logging.CollectAndUploadData(dataDir, name, targetFolder) {
fmt.Fprintf(os.Stderr, "%v\n", err)
+ if firsterr == nil {
+ firsterr = err
+ }
+ errcount++
}
modifier = fmt.Sprintf("-%d", counter)
counter++
})
+ if errcount != 0 {
+ reportErrorf("had %d errors, first: %v", errcount, firsterr)
+ }
},
}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index 6a772e137..35ec43efa 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -37,8 +37,7 @@ const (
errorNameAlreadyTaken = "The account name '%s' is already taken, please choose another."
errorNameDoesntExist = "An account named '%s' does not exist."
infoSetAccountToDefault = "Set account '%s' to be the default account"
- errorSigningTX = "Couldn't sign tx with kmd: %s"
- errorOnlineTX = "Couldn't sign tx: %s (for multisig accounts, write tx to file and sign manually)"
+ errorSigningTX = "Couldn't sign tx with kmd: %s (for multisig accounts, write tx to file and sign manually)"
errorConstructingTX = "Couldn't construct tx: %s"
errorBroadcastingTX = "Couldn't broadcast tx with algod: %s"
warnMultisigDuplicatesDetected = "Warning: one or more duplicate addresses detected in multisig account creation. This will effectively give the duplicated address(es) extra signature weight. Continuing multisig account creation."
@@ -56,38 +55,39 @@ const (
errorKMDFailedToStop = "Failed to stop kmd: %s"
// Node
- infoNodeStart = "Algorand node successfully started!"
- infoNodeAlreadyStarted = "Algorand node was already started!"
- infoNodeDidNotRestart = "Algorand node did not restart. The node is still running!"
- infoTryingToStopNode = "Trying to stop the node..."
- infoNodeShuttingDown = "Algorand node is shutting down..."
- infoNodeSuccessfullyStopped = "The node was successfully stopped."
- infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
- catchupStoppedOnUnsupported = "Last supported block (%d) is committed. The next block consensus protocol is not supported. Catchup service is stopped."
- infoNodeCatchpointCatchupStatus = "Last committed block: %d\nSync Time: %s\nCatchpoint: %s"
- infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d"
- infoNodeCatchpointCatchupBlocks = "Catchpoint total blocks: %d\nCatchpoint downloaded blocks: %d"
- nodeLastCatchpoint = "Last Catchpoint: %s"
- errorNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number"
- errorNodeNotDetected = "Algorand node does not appear to be running: %s"
- errorNodeStatus = "Cannot contact Algorand node: %s"
- errorNodeFailedToStart = "Algorand node failed to start: %s"
- errorNodeRunning = "Node must be stopped before writing APIToken"
- errorNodeFailGenToken = "Cannot generate API token: %s"
- errorNodeCreation = "Error during node creation: %v"
- errorNodeManagedBySystemd = "This node is using systemd and should be managed with systemctl. For additional information refer to https://developer.algorand.org/docs/run-a-node/setup/install/#installing-algod-as-a-systemd-service"
- errorKill = "Cannot kill node: %s"
- errorCloningNode = "Error cloning the node: %s"
- infoNodeCloned = "Node cloned successfully to: %s"
- infoNodeWroteToken = "Successfully wrote new API token: %s"
- infoNodePendingTxnsDescription = "Pending Transactions (Truncated max=%d, Total in pool=%d): "
- infoNodeNoPendingTxnsDescription = "None"
- infoDataDir = "[Data Directory: %s]"
- errLoadingConfig = "Error loading Config file from '%s': %v"
- errorNodeFailedToShutdown = "Unable to shut down node: %v"
- errorCatchpointLabelParsingFailed = "The provided catchpoint is not a valid one"
- errorCatchpointLabelMissing = "A catchpoint argument is needed"
- errorTooManyCatchpointLabels = "The catchup command expect a single catchpoint"
+ infoNodeStart = "Algorand node successfully started!"
+ infoNodeAlreadyStarted = "Algorand node was already started!"
+ infoNodeDidNotRestart = "Algorand node did not restart. The node is still running!"
+ infoTryingToStopNode = "Trying to stop the node..."
+ infoNodeShuttingDown = "Algorand node is shutting down..."
+ infoNodeSuccessfullyStopped = "The node was successfully stopped."
+ infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
+ catchupStoppedOnUnsupported = "Last supported block (%d) is committed. The next block consensus protocol is not supported. Catchup service is stopped."
+ infoNodeCatchpointCatchupStatus = "Last committed block: %d\nSync Time: %s\nCatchpoint: %s"
+ infoNodeCatchpointCatchupAccounts = "Catchpoint total accounts: %d\nCatchpoint accounts processed: %d\nCatchpoint accounts verified: %d"
+ infoNodeCatchpointCatchupBlocks = "Catchpoint total blocks: %d\nCatchpoint downloaded blocks: %d"
+ nodeLastCatchpoint = "Last Catchpoint: %s"
+ errorNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number"
+ errorNodeNotDetected = "Algorand node does not appear to be running: %s"
+ errorNodeStatus = "Cannot contact Algorand node: %s"
+ errorNodeFailedToStart = "Algorand node failed to start: %s"
+ errorNodeRunning = "Node must be stopped before writing APIToken"
+ errorNodeFailGenToken = "Cannot generate API token: %s"
+ errorNodeCreation = "Error during node creation: %v"
+ errorNodeManagedBySystemd = "This node is using systemd and should be managed with systemctl. For additional information refer to https://developer.algorand.org/docs/run-a-node/setup/install/#installing-algod-as-a-systemd-service"
+ errorKill = "Cannot kill node: %s"
+ errorCloningNode = "Error cloning the node: %s"
+ infoNodeCloned = "Node cloned successfully to: %s"
+ infoNodeWroteToken = "Successfully wrote new API token: %s"
+ infoNodePendingTxnsDescription = "Pending Transactions (Truncated max=%d, Total in pool=%d): "
+ infoNodeNoPendingTxnsDescription = "None"
+ infoDataDir = "[Data Directory: %s]"
+ errLoadingConfig = "Error loading Config file from '%s': %v"
+ errorNodeFailedToShutdown = "Unable to shut down node: %v"
+ errorCatchpointLabelParsingFailed = "The provided catchpoint is not a valid one"
+ errorCatchpointLabelMissing = "A catchpoint argument is needed: %s"
+ errorUnableToLookupCatchpointLabel = "Unable to fetch catchpoint label"
+ errorTooManyCatchpointLabels = "The catchup command expect a single catchpoint"
// Asset
malformedMetadataHash = "Cannot base64-decode metadata hash %s: %s"
diff --git a/cmd/goal/network.go b/cmd/goal/network.go
index bea7d5ff4..ff4b7005b 100644
--- a/cmd/goal/network.go
+++ b/cmd/goal/network.go
@@ -42,7 +42,6 @@ func init() {
networkCmd.MarkPersistentFlagRequired("rootdir")
networkCreateCmd.Flags().StringVarP(&networkName, "network", "n", "", "Specify the name to use for the private network")
- networkCreateCmd.MarkFlagRequired("network")
networkCreateCmd.Flags().StringVarP(&networkTemplateFile, "template", "t", "", "Specify the path to the template file for the network")
networkCreateCmd.MarkFlagRequired("template")
networkCreateCmd.Flags().BoolVarP(&noImportKeys, "noimportkeys", "K", false, "Do not import root keys when creating the network (by default will import)")
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 67fccbf41..1624603e3 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io/ioutil"
"net"
+ "net/http"
"os"
"path/filepath"
"strings"
@@ -60,6 +61,8 @@ var newNodeFullConfig bool
var watchMillisecond uint64
var abortCatchup bool
+const catchpointURL = "https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/%s/latest.catchpoint"
+
func init() {
nodeCmd.AddCommand(startCmd)
nodeCmd.AddCommand(stopCmd)
@@ -119,18 +122,54 @@ var nodeCmd = &cobra.Command{
},
}
+func getMissingCatchpointLabel(URL string) (label string, err error) {
+ resp, err := http.Get(URL)
+ if err != nil {
+ return
+ }
+ if resp.StatusCode != 200 {
+ err = errors.New(resp.Status)
+ return
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ label = string(body)
+ label = strings.TrimSuffix(label, "\n")
+
+ // check if label is a valid catchpoint label
+ _, _, err = ledgercore.ParseCatchpointLabel(label)
+ if err != nil {
+ return
+ }
+ return
+}
+
var catchupCmd = &cobra.Command{
Use: "catchup",
Short: "Catchup the Algorand node to a specific catchpoint",
- Long: "Catchup allows making large jumps over round ranges without the need to incrementally validate each individual round.",
+ Long: "Catchup allows making large jumps over round ranges without the need to incrementally validate each individual round. If no catchpoint is provided, this command attempts to lookup the latest catchpoint from algorand-catchpoints.s3.us-east-2.amazonaws.com.",
Example: "goal node catchup 6500000#1234567890ABCDEF01234567890ABCDEF0\tStart catching up to round 6500000 with the provided catchpoint\ngoal node catchup --abort\t\t\t\t\tAbort the current catchup",
Args: catchpointCmdArgument,
Run: func(cmd *cobra.Command, args []string) {
- if abortCatchup == false && len(args) == 0 {
- fmt.Println(errorCatchpointLabelMissing)
- os.Exit(1)
- }
- onDataDirs(func(datadir string) { catchup(datadir, args) })
+ onDataDirs(func(dataDir string) {
+ if !abortCatchup && len(args) == 0 {
+ client := ensureAlgodClient(dataDir)
+ vers, err := client.AlgodVersions()
+ if err != nil {
+ reportErrorf(errorNodeStatus, err)
+ }
+ genesis := strings.Split(vers.GenesisID, "-")[0]
+ URL := fmt.Sprintf(catchpointURL, genesis)
+ label, err := getMissingCatchpointLabel(URL)
+ if err != nil {
+ reportErrorf(errorCatchpointLabelMissing, errorUnableToLookupCatchpointLabel)
+ }
+ args = append(args, label)
+ }
+ catchup(dataDir, args)
+ })
},
}
diff --git a/cmd/goal/node_test.go b/cmd/goal/node_test.go
new file mode 100644
index 000000000..3c35485bf
--- /dev/null
+++ b/cmd/goal/node_test.go
@@ -0,0 +1,112 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var isNum = regexp.MustCompile(`^[0-9]+$`)
+var isAlnum = regexp.MustCompile(`^[a-zA-Z0-9_]*$`)
+
+func TestGetMissingCatchpointLabel(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ tests := []struct {
+ name string
+ URL string
+ expectedErr string
+ statusCode int
+ }{
+ {
+ "bad request",
+ "",
+ "400 Bad Request",
+ http.StatusBadRequest,
+ },
+ {
+ "forbidden request",
+ "",
+ "403 Forbidden",
+ http.StatusForbidden,
+ },
+ {
+ "page not found",
+ "",
+ "404 Not Found",
+ http.StatusNotFound,
+ },
+ {
+ "bad gateway",
+ "",
+ "502 Bad Gateway",
+ http.StatusBadGateway,
+ },
+ {
+ "mainnet catchpoint",
+ "https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/mainnet/latest.catchpoint",
+ "",
+ http.StatusAccepted,
+ },
+ {
+ "betanet catchpoint",
+ "https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/betanet/latest.catchpoint",
+ "",
+ http.StatusAccepted,
+ },
+ {
+ "testnet catchpoint",
+ "https://algorand-catchpoints.s3.us-east-2.amazonaws.com/channel/testnet/latest.catchpoint",
+ "",
+ http.StatusAccepted,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, test.expectedErr, test.statusCode)
+ }))
+ defer ts.Close()
+
+ if test.expectedErr != "" {
+ test.URL = ts.URL
+ }
+
+ label, err := getMissingCatchpointLabel(test.URL)
+
+ if test.expectedErr != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), test.expectedErr)
+ } else {
+ _, _, err = ledgercore.ParseCatchpointLabel(label)
+ assert.Equal(t, err, nil)
+ splittedLabel := strings.Split(label, "#")
+ assert.Equal(t, len(splittedLabel), 2)
+ assert.True(t, isNum.MatchString(splittedLabel[0]))
+ assert.True(t, isAlnum.MatchString(splittedLabel[1]))
+ }
+ })
+ }
+}
diff --git a/cmd/loadgenerator/config.go b/cmd/loadgenerator/config.go
index ec9576634..8a3a13cdb 100644
--- a/cmd/loadgenerator/config.go
+++ b/cmd/loadgenerator/config.go
@@ -18,15 +18,17 @@ package main
import (
"encoding/json"
+ "io"
"net/url"
"os"
+ "strings"
)
-const configFileName = "loadgenerator.config"
-
type config struct {
// AccountMnemonic is the mnemonic of the account from which we would like to spend Algos.
AccountMnemonic string
+ // AccountMnemonicList, if provided, is a series of mnemonics for accounts from which to spend Algos.
+ AccountMnemonicList []string
// ClientURL is the url ( such as http://127.0.0.1:8080 ) that would be used to communicate with a node REST endpoint
ClientURL *url.URL `json:"-"`
// APIToken is the API token used to communicate with the node.
@@ -37,6 +39,8 @@ type config struct {
RoundOffset uint64
// Fee is the amount of algos that would be specified in the transaction fee field.
Fee uint64
+ // TxnsToSend is the number of transactions to send in the round where (((round + RoundOffset) % RoundModulator) == 0)
+ TxnsToSend int
}
type fileConfig struct {
@@ -44,13 +48,21 @@ type fileConfig struct {
ClientURL string `json:"ClientURL"`
}
-func loadConfig() (cfg config, err error) {
- var fd *os.File
- fd, err = os.Open(configFileName)
- if err != nil {
- return config{}, err
+func loadConfig(configFileName string) (cfg config, err error) {
+ var fin io.Reader
+ if len(configFileName) > 0 && configFileName[0] == '{' {
+ // read -config "{json literal}"
+ fin = strings.NewReader(configFileName)
+ } else {
+ var fd *os.File
+ fd, err = os.Open(configFileName)
+ if err != nil {
+ return config{}, err
+ }
+ defer fd.Close()
+ fin = fd
}
- jsonDecoder := json.NewDecoder(fd)
+ jsonDecoder := json.NewDecoder(fin)
var fileCfg fileConfig
err = jsonDecoder.Decode(&fileCfg)
if err == nil {
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 8c2417a2c..25026f7f9 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -19,7 +19,11 @@ package main
import (
"flag"
"fmt"
+ "io/fs"
+ "io/ioutil"
+ "net/url"
"os"
+ "path/filepath"
"runtime"
"strings"
"sync"
@@ -30,19 +34,21 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/client"
generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
+ algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
)
-const transactionBlockSize = 800
-
-var runOnce = flag.Bool("once", false, "Terminate after first spend loop")
-
var nroutines = runtime.NumCPU() * 2
-func init() {
- flag.Parse()
+func maybefail(err error, msg string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, msg, args...)
+ os.Exit(1)
}
func loadMnemonic(mnemonic string) crypto.Seed {
@@ -57,22 +63,98 @@ func loadMnemonic(mnemonic string) crypto.Seed {
return seed
}
+// Like shared/pingpong/accounts.go
+func findRootKeys(algodDir string) []*crypto.SignatureSecrets {
+ keylist := make([]*crypto.SignatureSecrets, 0, 5)
+ err := filepath.Walk(algodDir, func(path string, info fs.FileInfo, err error) error {
+ var handle db.Accessor
+ handle, err = db.MakeErasableAccessor(path)
+ if err != nil {
+ return nil // don't care, move on
+ }
+ defer handle.Close()
+
+ // Fetch an account.Participation from the database
+ root, err := algodAcct.RestoreRoot(handle)
+ if err != nil {
+ return nil // don't care, move on
+ }
+ keylist = append(keylist, root.Secrets())
+ return nil
+ })
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%s: warning, %v\n", algodDir, err)
+ }
+ return keylist
+}
+
+var runOnce = flag.Bool("once", false, "Terminate after first spend loop")
+
func main() {
+ var algodDir string
+ flag.StringVar(&algodDir, "d", "", "algorand data dir")
+ var configArg string
+ flag.StringVar(&configArg, "config", "loadgenerator.config", "path to json or json literal")
+
var cfg config
var err error
- if cfg, err = loadConfig(); err != nil {
+ flag.Parse()
+ if cfg, err = loadConfig(configArg); err != nil {
fmt.Fprintf(os.Stderr, "unable to load config : %v\n", err)
os.Exit(1)
}
+
+ if (cfg.ClientURL == nil || cfg.ClientURL.String() == "") || cfg.APIToken == "" {
+ if algodDir != "" {
+ path := filepath.Join(algodDir, "algod.net")
+ net, err := ioutil.ReadFile(path)
+ maybefail(err, "%s: %v\n", path, err)
+ path = filepath.Join(algodDir, "algod.token")
+ token, err := ioutil.ReadFile(path)
+ maybefail(err, "%s: %v\n", path, err)
+ cfg.ClientURL, err = url.Parse(fmt.Sprintf("http://%s", string(strings.TrimSpace(string(net)))))
+ maybefail(err, "bad net url %v\n", err)
+ cfg.APIToken = string(token)
+ } else {
+ fmt.Fprintf(os.Stderr, "need (config.ClientURL and config.APIToken) or (-d ALGORAND_DATA)\n")
+ os.Exit(1)
+ }
+ }
fmt.Printf("Configuration file loaded successfully.\n")
- seed := loadMnemonic(cfg.AccountMnemonic)
- privateKey := crypto.GenerateSignatureSecrets(seed)
- publicKey := basics.Address(privateKey.SignatureVerifier)
+ var privateKeys []*crypto.SignatureSecrets
+ var publicKeys []basics.Address
+ addKey := func(mnemonic string) {
+ seed := loadMnemonic(mnemonic)
+ privateKeys = append(privateKeys, crypto.GenerateSignatureSecrets(seed))
+ publicKeys = append(publicKeys, basics.Address(privateKeys[0].SignatureVerifier))
+ }
+ if cfg.AccountMnemonic != "" { // one mnemonic provided
+ addKey(cfg.AccountMnemonic)
+ } else if len(cfg.AccountMnemonicList) > 0 {
+ for _, mnemonic := range cfg.AccountMnemonicList {
+ addKey(mnemonic)
+ }
+ } else if len(algodDir) > 0 {
+ // get test cluster local unlocked wallet
+ privateKeys = findRootKeys(algodDir)
+ if len(privateKeys) == 0 {
+ fmt.Fprintf(os.Stderr, "%s: found no root keys\n", algodDir)
+ os.Exit(1)
+ }
+ publicKeys = make([]basics.Address, len(privateKeys))
+ for i, sk := range privateKeys {
+ publicKeys[i] = basics.Address(sk.SignatureVerifier)
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "no keys specified in config files or -d algod dir")
+ }
- fmt.Printf("Spending account public key : %v\n", publicKey.String())
+ for i, publicKey := range publicKeys {
+ fmt.Printf("Spending account public key %d: %v\n", i, publicKey.String())
+ }
- err = spendLoop(cfg, privateKey, publicKey)
+ err = spendLoop(cfg, privateKeys, publicKeys)
if err != nil {
fmt.Fprintf(os.Stderr, "spend loop error : %v\n", err)
os.Exit(1)
@@ -92,12 +174,13 @@ func nextSpendRound(cfg config, round uint64) uint64 {
return ((round+cfg.RoundOffset)/cfg.RoundModulator)*cfg.RoundModulator + cfg.RoundModulator
}
-func spendLoop(cfg config, privateKey *crypto.SignatureSecrets, publicKey basics.Address) (err error) {
+func spendLoop(cfg config, privateKey []*crypto.SignatureSecrets, publicKey []basics.Address) (err error) {
restClient := client.MakeRestClient(*cfg.ClientURL, cfg.APIToken)
for {
- waitForRound(restClient, cfg, true)
- queueFull := generateTransactions(restClient, cfg, privateKey, publicKey)
+ nodeStatus := waitForRound(restClient, cfg, true)
+ queueFull := generateTransactions(restClient, cfg, privateKey, publicKey, nodeStatus)
if queueFull {
+ // done for this round, wait for a non-send round
waitForRound(restClient, cfg, false)
if *runOnce {
fmt.Fprintf(os.Stdout, "Once flag set, terminating.\n")
@@ -108,8 +191,7 @@ func spendLoop(cfg config, privateKey *crypto.SignatureSecrets, publicKey basics
return nil
}
-func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) {
- var nodeStatus generatedV2.NodeStatusResponse
+func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) (nodeStatus generatedV2.NodeStatusResponse) {
var err error
for {
nodeStatus, err = restClient.Status()
@@ -123,7 +205,7 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool)
return
}
if spendingRound {
- fmt.Printf("Current round %d, waiting for spending round %d\n", nodeStatus.LastRound, nextSpendRound(cfg, nodeStatus.LastRound))
+ fmt.Printf("Last round %d, waiting for spending round %d\n", nodeStatus.LastRound, nextSpendRound(cfg, nodeStatus.LastRound))
}
for {
// wait for the next round.
@@ -141,14 +223,11 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool)
}
}
-func generateTransactions(restClient client.RestClient, cfg config, privateKey *crypto.SignatureSecrets, publicKey basics.Address) (queueFull bool) {
- var nodeStatus generatedV2.NodeStatusResponse
+const transactionBlockSize = 800
+
+func generateTransactions(restClient client.RestClient, cfg config, privateKeys []*crypto.SignatureSecrets, publicKeys []basics.Address, nodeStatus generatedV2.NodeStatusResponse) (queueFull bool) {
+ start := time.Now()
var err error
- nodeStatus, err = restClient.Status()
- if err != nil {
- fmt.Fprintf(os.Stderr, "unable to check status : %v", err)
- return false
- }
var vers common.Version
vers, err = restClient.Versions()
if err != nil {
@@ -157,12 +236,16 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKey *
}
var genesisHash crypto.Digest
copy(genesisHash[:], vers.GenesisHash)
- // create transactionBlockSize transaction to send.
- txns := make([]transactions.SignedTxn, transactionBlockSize, transactionBlockSize)
+ sendSize := cfg.TxnsToSend
+ if cfg.TxnsToSend == 0 {
+ sendSize = transactionBlockSize
+ }
+ // create sendSize transaction to send.
+ txns := make([]transactions.SignedTxn, sendSize, sendSize)
for i := range txns {
tx := transactions.Transaction{
Header: transactions.Header{
- Sender: publicKey,
+ Sender: publicKeys[i%len(publicKeys)],
Fee: basics.MicroAlgos{Raw: cfg.Fee},
FirstValid: basics.Round(nodeStatus.LastRound),
LastValid: basics.Round(nodeStatus.LastRound + 2),
@@ -171,23 +254,24 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKey *
GenesisHash: genesisHash,
},
PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: publicKey,
+ Receiver: publicKeys[i%len(publicKeys)],
Amount: basics.MicroAlgos{Raw: 0},
},
Type: protocol.PaymentTx,
}
crypto.RandBytes(tx.Note[:])
- txns[i] = tx.Sign(privateKey)
+ txns[i] = tx.Sign(privateKeys[i%len(privateKeys)])
}
// create multiple go-routines to send all these requests.
+ // each thread makes new HTTP connections per API call
var sendWaitGroup sync.WaitGroup
sendWaitGroup.Add(nroutines)
sent := make([]int, nroutines, nroutines)
for i := 0; i < nroutines; i++ {
go func(base int) {
defer sendWaitGroup.Done()
- for x := base; x < transactionBlockSize; x += nroutines {
+ for x := base; x < sendSize; x += nroutines {
_, err2 := restClient.SendRawTransaction(txns[x])
if err2 != nil {
if strings.Contains(err2.Error(), "txn dead") || strings.Contains(err2.Error(), "below threshold") {
@@ -205,5 +289,11 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKey *
for i := 0; i < nroutines; i++ {
totalSent += sent[i]
}
- return totalSent != transactionBlockSize
+ dt := time.Now().Sub(start)
+ fmt.Fprintf(os.Stdout, "sent %d/%d in %s (%.1f/s)\n", totalSent, sendSize, dt.String(), float64(totalSent)/dt.Seconds())
+ if cfg.TxnsToSend != 0 {
+ // We attempted what we were asked. We're done.
+ return true
+ }
+ return totalSent != sendSize
}
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 46d3365cb..94a394bbc 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -28,11 +28,13 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+var docVersion = 7
+
func opGroupMarkdownTable(names []string, out io.Writer) {
fmt.Fprint(out, `| Opcode | Description |
| - | -- |
`)
- opSpecs := logic.OpsByName[logic.LogicVersion]
+ opSpecs := logic.OpsByName[docVersion]
for _, opname := range names {
spec, ok := opSpecs[opname]
if !ok {
@@ -184,7 +186,7 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bo
if cost.From == cost.To {
fmt.Fprintf(out, " - %s (v%d)\n", cost.Cost, cost.To)
} else {
- if cost.To < logic.LogicVersion {
+ if cost.To < docVersion {
fmt.Fprintf(out, " - %s (v%d - v%d)\n", cost.Cost, cost.From, cost.To)
} else {
fmt.Fprintf(out, " - %s (since v%d)\n", cost.Cost, cost.From)
@@ -221,7 +223,7 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bo
func opsToMarkdown(out io.Writer) (err error) {
out.Write([]byte("# Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n"))
- opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ opSpecs := logic.OpcodesByVersion(uint64(docVersion))
written := make(map[string]bool)
for _, spec := range opSpecs {
err = opToMarkdown(out, &spec, written)
@@ -317,7 +319,7 @@ func argEnums(name string) ([]string, string) {
}
func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
- opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ opSpecs := logic.OpcodesByVersion(uint64(docVersion))
records := make([]OpRecord, len(opSpecs))
for i, spec := range opSpecs {
records[i].Opcode = spec.Opcode
@@ -326,13 +328,13 @@ func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
records[i].Returns = typeString(spec.Return.Types)
records[i].Size = spec.OpDetails.Size
records[i].ArgEnum, records[i].ArgEnumTypes = argEnums(spec.Name)
- records[i].Doc = logic.OpDoc(spec.Name)
+ records[i].Doc = strings.ReplaceAll(logic.OpDoc(spec.Name), "<br />", "\n")
records[i].DocExtra = logic.OpDocExtra(spec.Name)
records[i].ImmediateNote = logic.OpImmediateNote(spec.Name)
records[i].Groups = opGroups[spec.Name]
}
return &LanguageSpec{
- EvalMaxVersion: logic.LogicVersion,
+ EvalMaxVersion: docVersion,
LogicSigVersion: config.Consensus[protocol.ConsensusCurrentVersion].LogicSigVersion,
Ops: records,
}
@@ -367,7 +369,7 @@ func main() {
constants.Close()
written := make(map[string]bool)
- opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ opSpecs := logic.OpcodesByVersion(uint64(docVersion))
for _, spec := range opSpecs {
for _, imm := range spec.OpDetails.Immediates {
if imm.Group != nil && !written[imm.Group.Name] {
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index 9866ae504..7c193d01e 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -126,7 +126,7 @@ func buildSyntaxHighlight() *tmLanguage {
allNamedFields = append(allNamedFields, logic.TxnTypeNames[:]...)
allNamedFields = append(allNamedFields, logic.OnCompletionNames[:]...)
accumulated := make(map[string]bool)
- opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ opSpecs := logic.OpcodesByVersion(uint64(docVersion))
for _, spec := range opSpecs {
for _, imm := range spec.OpDetails.Immediates {
if imm.Group != nil && !accumulated[imm.Group.Name] {
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index 21bd8c1aa..779ed3f29 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -23,6 +23,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "runtime/pprof"
"strconv"
"time"
@@ -65,6 +66,8 @@ var appProgLocalKeys uint32
var duration uint32
var rekey bool
var nftAsaPerSecond uint32
+var pidFile string
+var cpuprofile string
func init() {
rootCmd.AddCommand(runCmd)
@@ -103,7 +106,8 @@ func init() {
runCmd.Flags().BoolVar(&rekey, "rekey", false, "Create RekeyTo transactions. Requires groupsize=2 and any of random flags exc random dst")
runCmd.Flags().Uint32Var(&duration, "duration", 0, "The number of seconds to run the pingpong test, forever if 0")
runCmd.Flags().Uint32Var(&nftAsaPerSecond, "nftasapersecond", 0, "The number of NFT-style ASAs to create per second")
-
+ runCmd.Flags().StringVar(&pidFile, "pidfile", "", "path to write process id of this pingpong")
+ runCmd.Flags().StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
}
var runCmd = &cobra.Command{
@@ -116,6 +120,18 @@ var runCmd = &cobra.Command{
if err != nil {
reportErrorf("Cannot make temp dir: %v\n", err)
}
+ if cpuprofile != "" {
+ proff, err := os.Create(cpuprofile)
+ if err != nil {
+ reportErrorf("%s: %v\n", cpuprofile, err)
+ }
+ defer proff.Close()
+ err = pprof.StartCPUProfile(proff)
+ if err != nil {
+ reportErrorf("%s: StartCPUProfile %v\n", cpuprofile, err)
+ }
+ defer pprof.StopCPUProfile()
+ }
// Get libgoal Client
ac, err := libgoal.MakeClient(dataDir, cacheDir, libgoal.FullClient)
@@ -123,6 +139,22 @@ var runCmd = &cobra.Command{
panic(err)
}
+ if pidFile != "" {
+ pidf, err := os.Create(pidFile)
+ if err != nil {
+ reportErrorf("%s: %v\n", pidFile, err)
+ }
+ defer os.Remove(pidFile)
+ _, err = fmt.Fprintf(pidf, "%d", os.Getpid())
+ if err != nil {
+ reportErrorf("%s: %v\n", pidFile, err)
+ }
+ err = pidf.Close()
+ if err != nil {
+ reportErrorf("%s: %v\n", pidFile, err)
+ }
+ }
+
// Prepare configuration
var cfg pingpong.PpConfig
cfgPath := filepath.Join(ac.DataDir(), pingpong.ConfigFilename)
diff --git a/cmd/tealdbg/README.md b/cmd/tealdbg/README.md
index a5619cd86..b5a7bebbf 100644
--- a/cmd/tealdbg/README.md
+++ b/cmd/tealdbg/README.md
@@ -61,7 +61,7 @@ Local debugger supports setting the execution context: consensus protocol, trans
### Protocol
-Used to determine execution parameters and limits such as TEAL version, max program size and cost and so on.
+Used to determine execution parameters and limits such as program version, max program size and cost and so on.
```
$ tealdbg debug --proto https://github.com/algorandfoundation/specs/tree/e5f565421d720c6f75cdd186f7098495caf9101f
$ tealdbg debug --proto future
diff --git a/cmd/tealdbg/debugger_test.go b/cmd/tealdbg/debugger_test.go
index 4a390d461..4f4b35ea2 100644
--- a/cmd/tealdbg/debugger_test.go
+++ b/cmd/tealdbg/debugger_test.go
@@ -103,6 +103,7 @@ func TestDebuggerSimple(t *testing.T) {
ep := logic.NewEvalParams(make([]transactions.SignedTxnWithAD, 1), &proto, nil)
ep.Debugger = debugger
+ ep.SigLedger = logic.NoHeaderLedger{}
source := `int 0
int 1
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 8e92f66fd..c9cba4de3 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -545,6 +545,7 @@ func (r *LocalRunner) RunAll() error {
start := time.Now()
ep := logic.NewEvalParams(txngroup, &r.proto, &transactions.SpecialAddresses{})
+ ep.SigLedger = logic.NoHeaderLedger{}
configureDebugger(ep)
var last error
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index 50d3f6aae..cce2ae75d 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -281,6 +281,10 @@ func (l *localLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
+func (l *localLedger) BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, nil
+}
+
func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index 3ed4622ba..b613cfc32 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# shellcheck disable=2009,2093,2164
+UPDATER_MIN_VERSION="3.8.0"
+UPDATER_CHANNEL="stable"
FILENAME=$(basename -- "$0")
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
UPDATETYPE="update"
@@ -159,7 +161,7 @@ function validate_channel_specified() {
function determine_current_version() {
CURRENTVER="$(( ${BINDIR}/algod -v 2>/dev/null || echo 0 ) | head -n 1)"
- echo Current Version = ${CURRENTVER}
+ echo "Current Version = ${CURRENTVER}"
}
function get_updater_url() {
@@ -172,6 +174,8 @@ function get_updater_url() {
UNAME=$(uname -m)
if [[ "${UNAME}" = "x86_64" ]]; then
ARCH="amd64"
+ elif [[ "${UNAME}" = "arm64" ]]; then
+ ARCH="arm64"
else
echo "This platform ${UNAME} is not supported by updater."
exit 1
@@ -192,45 +196,41 @@ function get_updater_url() {
exit 1
fi
else
- echo "This operation system ${UNAME} is not supported by updater."
+ echo "This operating system ${UNAME} is not supported by updater."
exit 1
fi
# the updater will auto-update itself to the latest version, this means that the version of updater that is downloaded
# can be arbitrary as long as the self-updating functionality is working, hence the hard-coded version
- UPDATER_URL="http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/releases/stable/f9d842778_3.6.2/install_stable_${OS}-${ARCH}_3.6.2.tar.gz"
- UPDATER_FILENAME="install_stable_${OS}-${ARCH}_3.6.2.tar.gz"
+ UPDATER_FILENAME="install_${UPDATER_CHANNEL}_${OS}-${ARCH}_${UPDATER_MIN_VERSION}.tar.gz"
+ UPDATER_URL="https://algorand-releases.s3.amazonaws.com/channel/${UPDATER_CHANNEL}/${UPDATER_FILENAME}"
- # if on linux, also set variables for signature and checksum validation
- if [ "$OS" = "linux" ] && [ "$VERIFY_UPDATER_ARCHIVE" = "1" ]; then
+ # also set variables for signature and checksum validation
+ if [ "$VERIFY_UPDATER_ARCHIVE" = "1" ]; then
UPDATER_PUBKEYURL="https://releases.algorand.com/key.pub"
- UPDATER_SIGURL="http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/releases/stable/f9d842778_3.6.2/install_stable_${OS}-${ARCH}_3.6.2.tar.gz.sig"
- UPDATER_CHECKSUMURL="https://algorand-releases.s3.amazonaws.com/channel/stable/hashes_stable_${OS}_${ARCH}_3.6.2"
+ UPDATER_SIGURL="https://algorand-releases.s3.amazonaws.com/channel/${UPDATER_CHANNEL}/${UPDATER_FILENAME}.sig"
+ UPDATER_CHECKSUMURL="https://algorand-releases.s3.amazonaws.com/channel/${UPDATER_CHANNEL}/hashes_${UPDATER_CHANNEL}_${OS}_${ARCH}_${UPDATER_MIN_VERSION}"
fi
}
# check to see if the binary updater exists. if not, it will automatically the correct updater binary for the current platform
function check_for_updater() {
- local UNAME
- UNAME="$(uname)"
-
# check if the updater binary exist and is not empty.
if [[ -s "${SCRIPTPATH}/updater" && -f "${SCRIPTPATH}/updater" ]]; then
return 0
fi
# set UPDATER_URL and UPDATER_ARCHIVE as a global that can be referenced here
- # if linux, UPDATER_PUBKEYURL, UPDATER_SIGURL, UPDATER_CHECKSUMURL will be set to try verification
+ # UPDATER_PUBKEYURL, UPDATER_SIGURL, UPDATER_CHECKSUMURL will be set to try verification
get_updater_url
# check if curl is available
if ! type curl &>/dev/null; then
# no curl is installed.
echo "updater binary is missing and cannot be downloaded since curl is missing."
- if [ "$UNAME" = "Linux" ]; then
- echo "To install curl, run the following command:"
- echo "apt-get update; apt-get install -y curl"
- fi
+ echo "To install curl, run the following command:"
+ echo "On Linux: apt-get update; apt-get install -y curl"
+ echo "On Mac: brew install curl"
exit 1
fi
@@ -240,6 +240,7 @@ function check_for_updater() {
UPDATER_ARCHIVE="${UPDATER_TEMPDIR}/${UPDATER_FILENAME}"
# download updater archive
+ echo "Downloading $UPDATER_URL"
if ! curl -sSL "$UPDATER_URL" -o "$UPDATER_ARCHIVE"; then
echo "failed to download updater archive from ${UPDATER_URL} using curl."
exit 1
@@ -248,24 +249,25 @@ function check_for_updater() {
if [ ! -f "$UPDATER_ARCHIVE" ]; then
echo "downloaded file ${UPDATER_ARCHIVE} is missing."
exit
+ else
+ echo "Downloaded into file ${UPDATER_ARCHIVE}"
fi
# if -verify command line flag is set, try verifying updater archive
if [ "$VERIFY_UPDATER_ARCHIVE" = "1" ]; then
- # if linux, check for checksum and signature validation dependencies
+ echo "Starting to verify the updater archive"
+ # check for checksum and signature validation dependencies
local GPG_VERIFY="0" CHECKSUM_VERIFY="0"
- if [ "$UNAME" = "Linux" ]; then
- if type gpg >&/dev/null; then
- GPG_VERIFY="1"
- else
- echo "gpg is not available to perform signature validation."
- fi
+ if type gpg >&/dev/null; then
+ GPG_VERIFY="1"
+ else
+ echo "gpg is not available to perform signature validation."
+ fi
- if type sha256sum &>/dev/null; then
- CHECKSUM_VERIFY="1"
- else
- echo "sha256sum is not available to perform checksum validation."
- fi
+ if type sha256sum &>/dev/null; then
+ CHECKSUM_VERIFY="1"
+ else
+ echo "sha256sum is not available to perform checksum validation."
fi
# try signature validation
@@ -279,6 +281,8 @@ function check_for_updater() {
if ! gpg --verify "$UPDATER_SIGFILE" "$UPDATER_ARCHIVE"; then
echo "failed to verify signature of updater archive."
exit 1
+ else
+ echo "Verified signature of updater archive"
fi
else
echo "failed download signature file, cannot perform signature validation."
@@ -304,6 +308,8 @@ function check_for_updater() {
echo "failed to verify checksum of updater archive."
popd
exit 1
+ else
+ echo "Verified checksum of updater archive"
fi
popd
else
@@ -343,14 +349,14 @@ function check_for_update() {
if [ ${CURRENTVER} -ge ${LATEST} ]; then
if [ "${UPDATETYPE}" = "install" ]; then
- echo No new version found - forcing install anyway
+ echo "No new version found - forcing install anyway"
else
- echo No new version found
+ echo "No new version found"
return 1
fi
fi
- echo New version found
+ echo "New version found"
return 0
}
@@ -399,10 +405,10 @@ function download_update() {
${SCRIPTPATH}/updater ver get -c ${CHANNEL} -o ${TARFILE} ${BUCKET} ${SPECIFIC_VERSION}
if [ $? -ne 0 ]; then
- echo Error downloading update file
+ echo "Error downloading update file"
exit 1
fi
- echo Update Downloaded to ${TARFILE}
+ echo "Update Downloaded to ${TARFILE}"
}
function check_and_download_update() {
@@ -420,7 +426,7 @@ function download_update_for_current_version() {
}
function expand_update() {
- echo Expanding update...
+ echo "Expanding update..."
if ! tar -zxof "${TARFILE}" -C "${UPDATESRCDIR}"; then
return 1
fi
@@ -428,7 +434,7 @@ function expand_update() {
}
function validate_update() {
- echo Validating update...
+ echo "Validating update..."
# We should consider including a version.info file
# that we can compare against the expected version
return 0
@@ -488,7 +494,7 @@ function run_systemd_action() {
}
function backup_binaries() {
- echo Backing up current binary files...
+ echo "Backing up current binary files..."
mkdir -p "${BINDIR}/backup"
BACKUPFILES="algod kmd carpenter doberman goal update.sh updater diagcfg"
# add node_exporter to the files list we're going to backup, but only we if had it previously deployed.
@@ -531,7 +537,7 @@ function install_new_binaries() {
if [ ! -d ${UPDATESRCDIR}/bin ]; then
return 0
else
- echo Installing new binary files...
+ echo "Installing new binary files..."
ROLLBACKBIN=1
rm -rf ${BINDIR}/new
mkdir ${BINDIR}/new
@@ -550,7 +556,7 @@ function reset_wallets_for_new_ledger() {
for file in *.partkey *.rootkey; do
if [ -e "${file}" ]; then
cp "${file}" "${NEW_VER}/${file}"
- echo 'Installed genesis account file: ' "${file}"
+ echo "Installed genesis account file: ${file}"
fi
done
popd >/dev/null
@@ -633,12 +639,12 @@ function clean_legacy_logs() {
function startup_node() {
if [ "${NOSTART}" != "" ]; then
- echo Auto-start node disabled - not starting
+ echo "Auto-start node disabled - not starting"
return
fi
CURDATADIR=$1
- echo Restarting node in ${CURDATADIR}...
+ echo "Restarting node in ${CURDATADIR}..."
check_install_valid
if [ $? -ne 0 ]; then
@@ -658,7 +664,7 @@ function startup_nodes() {
}
function rollback() {
- echo Rolling back from failed update...
+ echo "Rolling back from failed update..."
if [ ${ROLLBACKBIN} -ne 0 ]; then
rollback_binaries
fi
diff --git a/compactcert/builder.go b/compactcert/builder.go
deleted file mode 100644
index 27302ed49..000000000
--- a/compactcert/builder.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "context"
- "database/sql"
- "encoding/binary"
- "fmt"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
-)
-
-func (ccw *Worker) builderForRound(rnd basics.Round) (builder, error) {
- hdr, err := ccw.ledger.BlockHdr(rnd)
- if err != nil {
- return builder{}, err
- }
-
- hdrProto := config.Consensus[hdr.CurrentProtocol]
- votersRnd := rnd.SubSaturate(basics.Round(hdrProto.CompactCertRounds))
- votersHdr, err := ccw.ledger.BlockHdr(votersRnd)
- if err != nil {
- return builder{}, err
- }
-
- lookback := votersRnd.SubSaturate(basics.Round(hdrProto.CompactCertVotersLookback))
- voters, err := ccw.ledger.CompactCertVoters(lookback)
- if err != nil {
- return builder{}, err
- }
-
- if voters == nil {
- // Voters not tracked for that round. Might not be a valid
- // compact cert round; compact certs might not be enabled; etc.
- return builder{}, fmt.Errorf("voters not tracked for lookback round %d", lookback)
- }
-
- p, err := ledger.CompactCertParams(votersHdr, hdr)
- if err != nil {
- return builder{}, err
- }
-
- var res builder
- res.votersHdr = votersHdr
- res.voters = voters
- res.Builder, err = compactcert.MkBuilder(p, voters.Participants, voters.Tree)
- if err != nil {
- return builder{}, err
- }
-
- ccw.builders[rnd] = res
- return res, nil
-}
-
-func (ccw *Worker) initBuilders() {
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
-
- var roundSigs map[basics.Round][]pendingSig
- err := ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
- return
- })
- if err != nil {
- ccw.log.Warnf("initBuilders: getPendingSigs: %v", err)
- return
- }
-
- for rnd, sigs := range roundSigs {
- _, ok := ccw.builders[rnd]
- if ok {
- ccw.log.Warnf("initBuilders: round %d already present", rnd)
- continue
- }
-
- builder, err := ccw.builderForRound(rnd)
- if err != nil {
- ccw.log.Warnf("initBuilders: builderForRound(%d): %v", rnd, err)
- continue
- }
-
- for _, sig := range sigs {
- pos, ok := builder.voters.AddrToPos[sig.signer]
- if !ok {
- ccw.log.Warnf("initBuilders: cannot find %v in round %d", sig.signer, rnd)
- continue
- }
-
- err = builder.Add(pos, sig.sig, false)
- if err != nil {
- ccw.log.Warnf("initBuilders: cannot add %v in round %d: %v", sig.signer, rnd, err)
- continue
- }
- }
- }
-}
-
-func (ccw *Worker) handleSigMessage(msg network.IncomingMessage) network.OutgoingMessage {
- var ssig sigFromAddr
- err := protocol.Decode(msg.Data, &ssig)
- if err != nil {
- ccw.log.Warnf("ccw.handleSigMessage(): decode: %v", err)
- return network.OutgoingMessage{Action: network.Disconnect}
- }
-
- fwd, err := ccw.handleSig(ssig, msg.Sender)
- if err != nil {
- ccw.log.Warnf("ccw.handleSigMessage(): %v", err)
- }
-
- return network.OutgoingMessage{Action: fwd}
-}
-
-func (ccw *Worker) handleSig(sfa sigFromAddr, sender network.Peer) (network.ForwardingPolicy, error) {
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
-
- builder, ok := ccw.builders[sfa.Round]
- if !ok {
- latest := ccw.ledger.Latest()
- latestHdr, err := ccw.ledger.BlockHdr(latest)
- if err != nil {
- return network.Disconnect, err
- }
-
- if sfa.Round < latestHdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound {
- // Already have a complete compact cert in ledger.
- // Ignore this sig.
- return network.Ignore, nil
- }
-
- builder, err = ccw.builderForRound(sfa.Round)
- if err != nil {
- return network.Disconnect, err
- }
- }
-
- pos, ok := builder.voters.AddrToPos[sfa.Signer]
- if !ok {
- return network.Disconnect, fmt.Errorf("handleSig: %v not in participants for %d", sfa.Signer, sfa.Round)
- }
-
- if builder.Present(pos) {
- // Signature already part of the builder, ignore.
- return network.Ignore, nil
- }
-
- err := builder.Add(pos, sfa.Sig, true)
- if err != nil {
- return network.Disconnect, err
- }
-
- err = ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return addPendingSig(tx, sfa.Round, pendingSig{
- signer: sfa.Signer,
- sig: sfa.Sig,
- fromThisNode: sender == nil,
- })
- })
- if err != nil {
- return network.Ignore, err
- }
-
- return network.Broadcast, nil
-}
-
-func (ccw *Worker) builder(latest basics.Round) {
- // We clock the building of compact certificates based on new
- // blocks. This is because the acceptable compact certificate
- // size grows over time, so that we aim to construct an extremely
- // compact certificate upfront, but if that doesn't work out, we
- // will settle for a larger certificate. New blocks also tell us
- // if a compact cert has been committed, so that we can stop trying
- // to build it.
- for {
- ccw.tryBuilding()
-
- nextrnd := latest + 1
- select {
- case <-ccw.ctx.Done():
- ccw.wg.Done()
- return
-
- case <-ccw.ledger.Wait(nextrnd):
- // Continue on
- }
-
- // See if any new compact certificates were formed, according to
- // the new block, which would mean we can clean up some builders.
- hdr, err := ccw.ledger.BlockHdr(nextrnd)
- if err != nil {
- ccw.log.Warnf("ccw.builder: BlockHdr(%d): %v", nextrnd, err)
- continue
- } else {
- ccw.deleteOldSigs(hdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound)
- }
-
- // Broadcast signatures based on the previous block(s) that
- // were agreed upon. This ensures that, if we send a signature
- // for block R, nodes will have already verified block R, because
- // block R+1 has been formed.
- proto := config.Consensus[hdr.CurrentProtocol]
- newLatest := ccw.ledger.Latest()
- for r := latest; r < newLatest; r++ {
- // Wait for the signer to catch up; mostly relevant in tests.
- ccw.waitForSignedBlock(r)
-
- ccw.broadcastSigs(r, proto)
- }
- latest = newLatest
- }
-}
-
-// broadcastSigs periodically broadcasts pending signatures for rounds
-// that have not been able to form a compact certificate.
-//
-// Signature re-broadcasting happens in periods of proto.CompactCertRounds
-// rounds.
-//
-// In the first half of each such period, signers of a block broadcast their
-// own signatures; this is the expected common path.
-//
-// In the second half of each such period, any signatures seen by this node
-// are broadcast.
-//
-// The broadcast schedule is randomized by the address of the block signer,
-// for load-balancing over time.
-func (ccw *Worker) broadcastSigs(brnd basics.Round, proto config.ConsensusParams) {
- if proto.CompactCertRounds == 0 {
- return
- }
-
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
-
- var roundSigs map[basics.Round][]pendingSig
- err := ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- if brnd%basics.Round(proto.CompactCertRounds) < basics.Round(proto.CompactCertRounds/2) {
- roundSigs, err = getPendingSigsFromThisNode(tx)
- } else {
- roundSigs, err = getPendingSigs(tx)
- }
- return
- })
- if err != nil {
- ccw.log.Warnf("broadcastSigs: getPendingSigs: %v", err)
- return
- }
-
- for rnd, sigs := range roundSigs {
- if rnd > brnd {
- // Signature is for later block than brnd. This could happen
- // during catchup or testing. The caller's loop will eventually
- // invoke this function with a suitably high brnd.
- continue
- }
-
- for _, sig := range sigs {
- // Randomize which sigs get broadcast over time.
- addr64 := binary.LittleEndian.Uint64(sig.signer[:])
- if addr64%(proto.CompactCertRounds/2) != uint64(brnd)%(proto.CompactCertRounds/2) {
- continue
- }
-
- sfa := sigFromAddr{
- Signer: sig.signer,
- Round: rnd,
- Sig: sig.sig,
- }
- err = ccw.net.Broadcast(context.Background(), protocol.CompactCertSigTag,
- protocol.Encode(&sfa), false, nil)
- if err != nil {
- ccw.log.Warnf("broadcastSigs: Broadcast for %d: %v", rnd, err)
- }
- }
- }
-}
-
-func (ccw *Worker) deleteOldSigs(nextCert basics.Round) {
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
-
- err := ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return deletePendingSigsBeforeRound(tx, nextCert)
- })
- if err != nil {
- ccw.log.Warnf("deletePendingSigsBeforeRound(%d): %v", nextCert, err)
- }
-
- for rnd := range ccw.builders {
- if rnd < nextCert {
- delete(ccw.builders, rnd)
- }
- }
-}
-
-func (ccw *Worker) tryBuilding() {
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
-
- for rnd, b := range ccw.builders {
- firstValid := ccw.ledger.Latest() + 1
- acceptableWeight := ledger.AcceptableCompactCertWeight(b.votersHdr, firstValid, logging.Base())
- if b.SignedWeight() < acceptableWeight {
- // Haven't signed enough to build the cert at this time..
- continue
- }
-
- if !b.Ready() {
- // Haven't gotten enough signatures to get past ProvenWeight
- continue
- }
-
- cert, err := b.Build()
- if err != nil {
- ccw.log.Warnf("ccw.tryBuilding: building compact cert for %d: %v", rnd, err)
- continue
- }
-
- var stxn transactions.SignedTxn
- stxn.Txn.Type = protocol.CompactCertTx
- stxn.Txn.Sender = transactions.CompactCertSender
- stxn.Txn.FirstValid = firstValid
- stxn.Txn.LastValid = firstValid + basics.Round(b.voters.Proto.MaxTxnLife)
- stxn.Txn.GenesisHash = ccw.ledger.GenesisHash()
- stxn.Txn.CertRound = rnd
- stxn.Txn.Cert = *cert
- err = ccw.txnSender.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn})
- if err != nil {
- ccw.log.Warnf("ccw.tryBuilding: broadcasting compact cert txn for %d: %v", rnd, err)
- }
- }
-}
-
-func (ccw *Worker) signedBlock(r basics.Round) {
- ccw.mu.Lock()
- ccw.signed = r
- ccw.mu.Unlock()
-
- select {
- case ccw.signedCh <- struct{}{}:
- default:
- }
-}
-
-func (ccw *Worker) lastSignedBlock() basics.Round {
- ccw.mu.Lock()
- defer ccw.mu.Unlock()
- return ccw.signed
-}
-
-func (ccw *Worker) waitForSignedBlock(r basics.Round) {
- for {
- if r <= ccw.lastSignedBlock() {
- return
- }
-
- select {
- case <-ccw.ctx.Done():
- return
- case <-ccw.signedCh:
- }
- }
-}
diff --git a/compactcert/signer.go b/compactcert/signer.go
deleted file mode 100644
index 915ac6040..000000000
--- a/compactcert/signer.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "context"
- "database/sql"
- "time"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto/merklesignature"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// sigFromAddr encapsulates a signature on a block header, which
-// will eventually be used to form a compact certificate for that
-// block.
-type sigFromAddr struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- Signer basics.Address `codec:"signer"`
- Round basics.Round `codec:"rnd"`
- Sig merklesignature.Signature `codec:"sig"`
-}
-
-func (ccw *Worker) signer(latest basics.Round) {
- var nextrnd basics.Round
-
-restart:
- for {
- latestHdr, err := ccw.ledger.BlockHdr(latest)
- if err != nil {
- ccw.log.Warnf("ccw.signer(): BlockHdr(latest %d): %v", latest, err)
- time.Sleep(1 * time.Second)
- latest = ccw.ledger.Latest()
- continue
- }
-
- nextrnd = latestHdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound
- if nextrnd == 0 {
- // Compact certs not enabled yet. Keep monitoring new blocks.
- nextrnd = latest + 1
- }
- break
- }
-
- for {
- select {
- case <-ccw.ledger.Wait(nextrnd):
- hdr, err := ccw.ledger.BlockHdr(nextrnd)
- if err != nil {
- ccw.log.Warnf("ccw.signer(): BlockHdr(next %d): %v", nextrnd, err)
- time.Sleep(1 * time.Second)
- latest = ccw.ledger.Latest()
- goto restart
- }
-
- ccw.signBlock(hdr)
- ccw.signedBlock(nextrnd)
- nextrnd++
-
- case <-ccw.ctx.Done():
- ccw.wg.Done()
- return
- }
- }
-}
-
-func (ccw *Worker) signBlock(hdr bookkeeping.BlockHeader) {
- proto := config.Consensus[hdr.CurrentProtocol]
- if proto.CompactCertRounds == 0 {
- return
- }
-
- // Only sign blocks that are a multiple of CompactCertRounds.
- if hdr.Round%basics.Round(proto.CompactCertRounds) != 0 {
- return
- }
-
- keys := ccw.accts.StateProofKeys(hdr.Round)
- if len(keys) == 0 {
- // No keys, nothing to do.
- return
- }
-
- // votersRound is the round containing the merkle root commitment
- // for the voters that are going to sign this block.
- votersRound := hdr.Round.SubSaturate(basics.Round(proto.CompactCertRounds))
- votersHdr, err := ccw.ledger.BlockHdr(votersRound)
- if err != nil {
- ccw.log.Warnf("ccw.signBlock(%d): BlockHdr(%d): %v", hdr.Round, votersRound, err)
- return
- }
-
- if votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVoters.IsEmpty() {
- // No voter commitment, perhaps because compact certs were
- // just enabled.
- return
- }
-
- sigs := make([]sigFromAddr, 0, len(keys))
-
- for _, key := range keys {
- if key.FirstValid > hdr.Round || hdr.Round > key.LastValid {
- continue
- }
-
- if key.StateProofSecrets == nil {
- ccw.log.Warnf("ccw.signBlock(%d): empty state proof secrets for round", hdr.Round)
- continue
- }
-
- sig, err := key.StateProofSecrets.Sign(hdr)
- if err != nil {
- ccw.log.Warnf("ccw.signBlock(%d): StateProofSecrets.Sign: %v", hdr.Round, err)
- continue
- }
-
- sigs = append(sigs, sigFromAddr{
- Signer: key.Account,
- Round: hdr.Round,
- Sig: sig,
- })
- }
-
- for _, sfa := range sigs {
- _, err = ccw.handleSig(sfa, nil)
- if err != nil {
- ccw.log.Warnf("ccw.signBlock(%d): handleSig: %v", hdr.Round, err)
- }
- }
-}
-
-// LatestSigsFromThisNode returns information about compact cert signatures from
-// this node's participation keys that are already stored durably on disk. In
-// particular, we return the round nunmber of the latest block signed with each
-// account's participation key. This is intended for use by the ephemeral key
-// logic: since we already have these signatures stored on disk, it is safe to
-// delete the corresponding ephemeral private keys.
-func (ccw *Worker) LatestSigsFromThisNode() (map[basics.Address]basics.Round, error) {
- res := make(map[basics.Address]basics.Round)
- err := ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- sigs, err := getPendingSigsFromThisNode(tx)
- if err != nil {
- return err
- }
-
- for rnd, psigs := range sigs {
- for _, psig := range psigs {
- if res[psig.signer] < rnd {
- res[psig.signer] = rnd
- }
- }
- }
-
- return nil
- })
- return res, err
-}
diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go
deleted file mode 100644
index aaf1a4e2b..000000000
--- a/compactcert/worker_test.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "context"
- "fmt"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/crypto/merklearray"
- "github.com/algorand/go-algorand/data/account"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/db"
- "github.com/algorand/go-deadlock"
-)
-
-type testWorkerStubs struct {
- t testing.TB
- mu deadlock.Mutex
- latest basics.Round
- waiters map[basics.Round]chan struct{}
- blocks map[basics.Round]bookkeeping.BlockHeader
- keys []account.Participation
- keysForVoters []account.Participation
- sigmsg chan []byte
- txmsg chan transactions.SignedTxn
- totalWeight int
-}
-
-func newWorkerStubs(t testing.TB, keys []account.Participation, totalWeight int) *testWorkerStubs {
- s := &testWorkerStubs{
- waiters: make(map[basics.Round]chan struct{}),
- blocks: make(map[basics.Round]bookkeeping.BlockHeader),
- sigmsg: make(chan []byte, 1024),
- txmsg: make(chan transactions.SignedTxn, 1024),
- keys: keys,
- keysForVoters: keys,
- totalWeight: totalWeight,
- }
- s.latest--
- s.addBlock(2 * basics.Round(config.Consensus[protocol.ConsensusFuture].CompactCertRounds))
- return s
-}
-
-func (s *testWorkerStubs) addBlock(ccNextRound basics.Round) {
- s.latest++
-
- hdr := bookkeeping.BlockHeader{}
- hdr.Round = s.latest
- hdr.CurrentProtocol = protocol.ConsensusFuture
-
- var ccBasic = bookkeeping.CompactCertState{
- CompactCertVoters: make([]byte, compactcert.HashSize),
- CompactCertVotersTotal: basics.MicroAlgos{},
- CompactCertNextRound: 0,
- }
- ccBasic.CompactCertVotersTotal.Raw = uint64(s.totalWeight)
-
- if hdr.Round > 0 {
- // Just so it's not zero, since the signer logic checks for all-zeroes
- ccBasic.CompactCertVoters[1] = 0x12
- }
-
- ccBasic.CompactCertNextRound = ccNextRound
- hdr.CompactCert = map[protocol.CompactCertType]bookkeeping.CompactCertState{
- protocol.CompactCertBasic: ccBasic,
- }
-
- s.blocks[s.latest] = hdr
- if s.waiters[s.latest] != nil {
- close(s.waiters[s.latest])
- }
-}
-
-func (s *testWorkerStubs) StateProofKeys(rnd basics.Round) (out []account.StateProofRecordForRound) {
- for _, part := range s.keys {
- if part.OverlapsInterval(rnd, rnd) {
- partRecord := account.ParticipationRecord{
- ParticipationID: part.ID(),
- Account: part.Parent,
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- KeyDilution: part.KeyDilution,
- LastVote: 0,
- LastBlockProposal: 0,
- LastStateProof: 0,
- EffectiveFirst: 0,
- EffectiveLast: 0,
- VRF: part.VRF,
- Voting: part.Voting,
- }
- signerInRound := part.StateProofSecrets.GetSigner(uint64(rnd))
- partRecordForRound := account.StateProofRecordForRound{
- ParticipationRecord: partRecord,
- StateProofSecrets: signerInRound,
- }
- out = append(out, partRecordForRound)
- }
- }
- return
-}
-
-func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- hdr, ok := s.blocks[r]
- if !ok {
- return hdr, ledgercore.ErrNoEntry{
- Round: r,
- Latest: s.latest,
- Committed: s.latest,
- }
- }
-
- return hdr, nil
-}
-
-func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
- voters := &ledgercore.VotersForRound{
- Proto: config.Consensus[protocol.ConsensusFuture],
- AddrToPos: make(map[basics.Address]uint64),
- TotalWeight: basics.MicroAlgos{Raw: uint64(s.totalWeight)},
- }
-
- for i, k := range s.keysForVoters {
- voters.AddrToPos[k.Parent] = uint64(i)
- voters.Participants = append(voters.Participants, basics.Participant{
- PK: *k.StateProofSecrets.GetVerifier(),
- Weight: 1,
- })
- }
-
- tree, err := merklearray.BuildVectorCommitmentTree(voters.Participants, crypto.HashFactory{HashType: compactcert.HashType})
- if err != nil {
- return nil, err
- }
-
- voters.Tree = tree
- return voters, nil
-}
-
-func (s *testWorkerStubs) GenesisHash() crypto.Digest {
- return crypto.Digest{0x01, 0x02, 0x03, 0x04}
-}
-
-func (s *testWorkerStubs) Latest() basics.Round {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.latest
-}
-
-func (s *testWorkerStubs) Wait(r basics.Round) chan struct{} {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.waiters[r] == nil {
- s.waiters[r] = make(chan struct{})
- if r <= s.latest {
- close(s.waiters[r])
- }
- }
- return s.waiters[r]
-}
-
-func (s *testWorkerStubs) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error {
- require.Equal(s.t, tag, protocol.CompactCertSigTag)
- s.sigmsg <- data
- return nil
-}
-
-func (s *testWorkerStubs) BroadcastInternalSignedTxGroup(tx []transactions.SignedTxn) error {
- require.Equal(s.t, len(tx), 1)
- s.txmsg <- tx[0]
- return nil
-}
-
-func (s *testWorkerStubs) RegisterHandlers([]network.TaggedMessageHandler) {
-}
-
-func (s *testWorkerStubs) advanceLatest(delta uint64) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for r := uint64(0); r < delta; r++ {
- s.addBlock(s.blocks[s.latest].CompactCert[protocol.CompactCertBasic].CompactCertNextRound)
- }
-}
-
-func newTestWorkerDB(t testing.TB, s *testWorkerStubs, dba db.Accessor) *Worker {
- return NewWorker(dba, logging.TestingLog(t), s, s, s, s)
-}
-
-func newTestWorker(t testing.TB, s *testWorkerStubs) *Worker {
- dbs, _ := dbOpenTest(t, true)
- return newTestWorkerDB(t, s, dbs.Wdb)
-}
-
-// You must call defer part.Close() after calling this function,
-// since it creates a DB accessor but the caller must close it (required for mss)
-func newPartKey(t testing.TB, parent basics.Address) account.PersistedParticipation {
- fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
- partDB, err := db.MakeAccessor(fn, false, true)
- require.NoError(t, err)
-
- part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, basics.Round(10*config.Consensus[protocol.ConsensusFuture].CompactCertRounds), config.Consensus[protocol.ConsensusFuture].DefaultKeyDilution)
- require.NoError(t, err)
-
- return part
-}
-
-func TestWorkerAllSigs(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 10; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, len(keys))
- w := newTestWorker(t, s)
- w.Start()
- defer w.Shutdown()
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(proto.CompactCertRounds + proto.CompactCertRounds/2)
-
- // Go through several iterations, making sure that we get
- // the signatures and certs broadcast at each round.
- for iter := 0; iter < 5; iter++ {
- s.advanceLatest(proto.CompactCertRounds)
-
- for i := 0; i < len(keys); i++ {
- // Expect all signatures to be broadcast.
- _ = <-s.sigmsg
- }
-
- // Expect a compact cert to be formed.
- for {
- tx := <-s.txmsg
- require.Equal(t, tx.Txn.Type, protocol.CompactCertTx)
- if tx.Txn.CertRound < basics.Round(iter+2)*basics.Round(proto.CompactCertRounds) {
- continue
- }
-
- require.Equal(t, tx.Txn.CertRound, basics.Round(iter+2)*basics.Round(proto.CompactCertRounds))
-
- signedHdr, err := s.BlockHdr(tx.Txn.CertRound)
- require.NoError(t, err)
-
- provenWeight, overflowed := basics.Muldiv(uint64(s.totalWeight), uint64(proto.CompactCertWeightThreshold), 1<<32)
- require.False(t, overflowed)
-
- ccparams := compactcert.Params{
- Msg: signedHdr,
- ProvenWeight: provenWeight,
- SigRound: basics.Round(signedHdr.Round),
- SecKQ: proto.CompactCertSecKQ,
- }
-
- voters, err := s.CompactCertVoters(tx.Txn.CertRound - basics.Round(proto.CompactCertRounds) - basics.Round(proto.CompactCertVotersLookback))
- require.NoError(t, err)
-
- verif := compactcert.MkVerifier(ccparams, voters.Tree.Root())
- err = verif.Verify(&tx.Txn.Cert)
- require.NoError(t, err)
- break
- }
- }
-}
-
-func TestWorkerPartialSigs(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 7; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, 10)
- w := newTestWorker(t, s)
- w.Start()
- defer w.Shutdown()
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(proto.CompactCertRounds + proto.CompactCertRounds/2)
- s.advanceLatest(proto.CompactCertRounds)
-
- for i := 0; i < len(keys); i++ {
- // Expect all signatures to be broadcast.
- _ = <-s.sigmsg
- }
-
- // No compact cert should be formed yet: not enough sigs for a cert this early.
- select {
- case <-s.txmsg:
- t.Fatal("compact cert formed too early")
- case <-time.After(time.Second):
- }
-
- // Expect a compact cert to be formed in the next CompactCertRounds/2.
- s.advanceLatest(proto.CompactCertRounds / 2)
- tx := <-s.txmsg
- require.Equal(t, tx.Txn.Type, protocol.CompactCertTx)
- require.Equal(t, tx.Txn.CertRound, 2*basics.Round(proto.CompactCertRounds))
-
- signedHdr, err := s.BlockHdr(tx.Txn.CertRound)
- require.NoError(t, err)
-
- provenWeight, overflowed := basics.Muldiv(uint64(s.totalWeight), uint64(proto.CompactCertWeightThreshold), 1<<32)
- require.False(t, overflowed)
-
- ccparams := compactcert.Params{
- Msg: signedHdr,
- ProvenWeight: provenWeight,
- SigRound: basics.Round(signedHdr.Round),
- SecKQ: proto.CompactCertSecKQ,
- }
-
- voters, err := s.CompactCertVoters(tx.Txn.CertRound - basics.Round(proto.CompactCertRounds) - basics.Round(proto.CompactCertVotersLookback))
- require.NoError(t, err)
-
- verif := compactcert.MkVerifier(ccparams, voters.Tree.Root())
- err = verif.Verify(&tx.Txn.Cert)
- require.NoError(t, err)
-}
-
-func TestWorkerInsufficientSigs(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 2; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, 10)
- w := newTestWorker(t, s)
- w.Start()
- defer w.Shutdown()
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(3 * proto.CompactCertRounds)
-
- for i := 0; i < len(keys); i++ {
- // Expect all signatures to be broadcast.
- _ = <-s.sigmsg
- }
-
- // No compact cert should be formed: not enough sigs.
- select {
- case <-s.txmsg:
- t.Fatal("compact cert formed without enough sigs")
- case <-time.After(time.Second):
- }
-}
-
-func TestLatestSigsFromThisNode(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 10; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, 10)
- w := newTestWorker(t, s)
- w.Start()
- defer w.Shutdown()
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(3*proto.CompactCertRounds - 2)
-
- // Wait for a compact cert to be formed, so we know the signer thread is caught up.
- _ = <-s.txmsg
-
- var latestSigs map[basics.Address]basics.Round
- var err error
- for x := 0; x < 10; x++ {
- latestSigs, err = w.LatestSigsFromThisNode()
- require.NoError(t, err)
- if len(latestSigs) == len(keys) {
- break
- }
- time.Sleep(256 * time.Millisecond)
- }
- require.Equal(t, len(keys), len(latestSigs))
- for _, k := range keys {
- require.Equal(t, latestSigs[k.Parent], basics.Round(2*proto.CompactCertRounds))
- }
-
- // Add a block that claims the compact cert is formed.
- s.mu.Lock()
- s.addBlock(3 * basics.Round(proto.CompactCertRounds))
- s.mu.Unlock()
-
- // Wait for the builder to discard the signatures.
- for x := 0; x < 10; x++ {
- latestSigs, err = w.LatestSigsFromThisNode()
- require.NoError(t, err)
- if len(latestSigs) == 0 {
- break
- }
- time.Sleep(256 * time.Millisecond)
- }
- require.Equal(t, 0, len(latestSigs))
-}
-
-func TestWorkerRestart(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 10; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, 10)
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(3*proto.CompactCertRounds - 1)
-
- dbRand := crypto.RandUint64()
-
- formedAt := -1
- for i := 0; formedAt < 0 && i < len(keys); i++ {
- // Give one key at a time to the worker, and then shut it down,
- // to make sure that it will correctly save and restore these
- // signatures across restart.
- s.keys = keys[i : i+1]
- dbs, _ := dbOpenTestRand(t, true, dbRand)
- w := newTestWorkerDB(t, s, dbs.Wdb)
- w.Start()
-
- // Check if the cert formed
- select {
- case <-s.txmsg:
- formedAt = i
- case <-time.After(time.Second):
- }
-
- w.Shutdown()
- }
-
- require.True(t, formedAt > 1)
- require.True(t, formedAt < 5)
-}
-
-func TestWorkerHandleSig(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var keys []account.Participation
- for i := 0; i < 2; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
- }
-
- s := newWorkerStubs(t, keys, 10)
- w := newTestWorker(t, s)
- w.Start()
- defer w.Shutdown()
-
- proto := config.Consensus[protocol.ConsensusFuture]
- s.advanceLatest(3 * proto.CompactCertRounds)
-
- for i := 0; i < len(keys); i++ {
- // Expect all signatures to be broadcast.
- msg := <-s.sigmsg
- res := w.handleSigMessage(network.IncomingMessage{
- Data: msg,
- })
-
- // This should be a dup signature, so should not be broadcast
- // but also not disconnected.
- require.Equal(t, res.Action, network.Ignore)
- }
-}
diff --git a/components/mocks/mockParticipationRegistry.go b/components/mocks/mockParticipationRegistry.go
index d2aa53f26..d7a53c36f 100644
--- a/components/mocks/mockParticipationRegistry.go
+++ b/components/mocks/mockParticipationRegistry.go
@@ -44,6 +44,11 @@ func (m *MockParticipationRegistry) Delete(id account.ParticipationID) error {
return nil
}
+// DeleteStateProofKeys removes all stateproof keys preceding a given round (including)
+func (m *MockParticipationRegistry) DeleteStateProofKeys(id account.ParticipationID, round basics.Round) error {
+ return nil
+}
+
// DeleteExpired removes all records from storage which are expired on the given round.
func (m *MockParticipationRegistry) DeleteExpired(latestRound basics.Round, agreementProto config.ConsensusParams) error {
return nil
@@ -64,9 +69,9 @@ func (m *MockParticipationRegistry) GetForRound(id account.ParticipationID, roun
return account.ParticipationRecordForRound{}, nil
}
-// GetStateProofForRound fetches a record with stateproof secrets for a particular round.
-func (m *MockParticipationRegistry) GetStateProofForRound(id account.ParticipationID, round basics.Round) (account.StateProofRecordForRound, error) {
- return account.StateProofRecordForRound{}, nil
+// GetStateProofSecretsForRound fetches a record with stateproof secrets for a particular round.
+func (m *MockParticipationRegistry) GetStateProofSecretsForRound(id account.ParticipationID, round basics.Round) (account.StateProofSecretsForRound, error) {
+ return account.StateProofSecretsForRound{}, nil
}
// HasLiveKeys quickly tests to see if there is a valid participation key over some range of rounds
diff --git a/config/config.go b/config/config.go
index 4c1680130..023561f68 100644
--- a/config/config.go
+++ b/config/config.go
@@ -61,9 +61,9 @@ const LedgerFilenamePrefix = "ledger"
// It is used to recover from node crashes.
const CrashFilename = "crash.sqlite"
-// CompactCertFilename is the name of the compact certificate database file.
-// It is used to track in-progress compact certificates.
-const CompactCertFilename = "compactcert.sqlite"
+// StateProofFileName is the name of the state proof database file.
+// It is used to track in-progress state proofs.
+const StateProofFileName = "stateproof.sqlite"
// ParticipationRegistryFilename is the name of the participation registry database file.
// It is used for tracking participation key metadata.
diff --git a/config/consensus.go b/config/consensus.go
index 42e196b1b..af0b50e53 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -344,50 +344,53 @@ type ConsensusParams struct {
// to limit the maximum size of a single balance record
MaximumMinimumBalance uint64
- // CompactCertRounds defines the frequency with which compact
- // certificates are generated. Every round that is a multiple
- // of CompactCertRounds, the block header will include a Merkle
+ // StateProofInterval defines the frequency with which state
+ // proofs are generated. Every round that is a multiple
+ // of StateProofInterval, the block header will include a vector
// commitment to the set of online accounts (that can vote after
- // another CompactCertRounds rounds), and that block will be signed
- // (forming a compact certificate) by the voters from the previous
- // such Merkle tree commitment. A value of zero means no compact
- // certificates.
- CompactCertRounds uint64
-
- // CompactCertTopVoters is a bound on how many online accounts get to
- // participate in forming the compact certificate, by including the
- // top CompactCertTopVoters accounts (by normalized balance) into the
- // Merkle commitment.
- CompactCertTopVoters uint64
-
- // CompactCertVotersLookback is the number of blocks we skip before
- // publishing a Merkle commitment to the online accounts. Namely,
- // if block number N contains a Merkle commitment to the online
- // accounts (which, incidentally, means N%CompactCertRounds=0),
+ // another StateProofInterval rounds), and that block will be signed
+ // (forming a state proof) by the voters from the previous
+ // such vector commitment. A value of zero means no state proof.
+ StateProofInterval uint64
+
+ // StateProofTopVoters is a bound on how many online accounts get to
+ // participate in forming the state proof, by including the
+ // top StateProofTopVoters accounts (by normalized balance) into the
+ // vector commitment.
+ StateProofTopVoters uint64
+
+ // StateProofVotersLookback is the number of blocks we skip before
+ // publishing a vector commitment to the online accounts. Namely,
+ // if block number N contains a vector commitment to the online
+ // accounts (which, incidentally, means N%StateProofInterval=0),
// then the balances reflected in that commitment must come from
- // block N-CompactCertVotersLookback. This gives each node some
- // time (CompactCertVotersLookback blocks worth of time) to
- // construct this Merkle tree, so as to avoid placing the
- // construction of this Merkle tree (and obtaining the requisite
+ // block N-StateProofVotersLookback. This gives each node some
+ // time (StateProofVotersLookback blocks worth of time) to
+ // construct this vector commitment, so as to avoid placing the
+ // construction of this vector commitment (and obtaining the requisite
// accounts and balances) in the critical path.
- CompactCertVotersLookback uint64
+ StateProofVotersLookback uint64
- // CompactCertWeightThreshold specifies the fraction of top voters weight
- // that must sign the message (block header) for security. The compact
- // certificate ensures this threshold holds; however, forming a valid
- // compact certificate requires a somewhat higher number of signatures,
- // and the more signatures are collected, the smaller the compact cert
+ // StateProofWeightThreshold specifies the fraction of top voters weight
+ // that must sign the message (block header) for security. The state
+ // proof ensures this threshold holds; however, forming a valid
+ // state proof requires a somewhat higher number of signatures,
+ // and the more signatures are collected, the smaller the state proof
// can be.
//
// This threshold can be thought of as the maximum fraction of
- // malicious weight that compact certificates defend against.
+ // malicious weight that state proofs defend against.
//
- // The threshold is computed as CompactCertWeightThreshold/(1<<32).
- CompactCertWeightThreshold uint32
+ // The threshold is computed as StateProofWeightThreshold/(1<<32).
+ StateProofWeightThreshold uint32
- // CompactCertSecKQ is the security parameter (k+q) for the compact
- // certificate scheme.
- CompactCertSecKQ uint64
+ // StateProofStrengthTarget represents either k+q (for pre-quantum security) or k+2q (for post-quantum security)
+ StateProofStrengthTarget uint64
+
+ // StateProofMaxRecoveryIntervals represents the number of state proof intervals that the network will try to catch-up with.
+ // When the difference between the latest state proof and the current round will be greater than value, Nodes will
+ // release resources allocated for creating state proofs.
+ StateProofMaxRecoveryIntervals uint64
// EnableAssetCloseAmount adds an extra field to the ApplyData. The field contains the amount of the remaining
// asset that were sent to the close-to address.
@@ -418,9 +421,6 @@ type ConsensusParams struct {
// in a separate table.
EnableAccountDataResourceSeparation bool
- //EnableBatchVerification enable the use of the batch verification algorithm.
- EnableBatchVerification bool
-
// When rewards rate changes, use the new value immediately.
RewardsCalculationFix bool
@@ -440,6 +440,26 @@ type ConsensusParams struct {
// This new header is in addition to the existing SHA512_256 merkle root.
// It is useful for verifying transaction on different blockchains, as some may not support SHA512_256 OPCODE natively but SHA256 is common.
EnableSHA256TxnCommitmentHeader bool
+
+ // CatchpointLookback specifies a round lookback to take catchpoints at.
+ // Accounts snapshot for round X will be taken at X-CatchpointLookback
+ CatchpointLookback uint64
+
+ // DeeperBlockHeaderHistory defines number of rounds in addition to MaxTxnLife
+ // available for lookup for smart contracts and smart signatures.
+ // Setting it to 1 for example allows querying data up to MaxTxnLife + 1 rounds back from the Latest.
+ DeeperBlockHeaderHistory uint64
+
+ // EnableOnlineAccountCatchpoints specifies when to re-enable catchpoints after the online account table migration has occurred.
+ EnableOnlineAccountCatchpoints bool
+
+ // UnfundedSenders ensures that accounts with no balance (so they don't even
+ // "exist") can still be a transaction sender by avoiding updates to rewards
+ // state for accounts with no algos. The actual change implemented to allow
+ // this is to avoid updating an account if the only change would have been
+ // the rewardsLevel, but the rewardsLevel has no meaning because the account
+ // has fewer than RewardUnit algos.
+ UnfundedSenders bool
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -455,7 +475,7 @@ const (
// PaysetCommitFlat hashes the entire payset array.
PaysetCommitFlat
- // PaysetCommitMerkle uses merklearray to commit to the payset.
+ // PaysetCommitMerkle uses merkle array to commit to the payset.
PaysetCommitMerkle
)
@@ -596,7 +616,7 @@ func (cp ConsensusProtocols) DeepCopy() ConsensusProtocols {
return staticConsensus
}
-// Merge merges a configurable consensus ontop of the existing consensus protocol and return
+// Merge merges a configurable consensus on top of the existing consensus protocol and return
// a new consensus protocol without modify any of the incoming structures.
func (cp ConsensusProtocols) Merge(configurableConsensus ConsensusProtocols) ConsensusProtocols {
staticConsensus := cp.DeepCopy()
@@ -1085,7 +1105,6 @@ func initConsensusProtocols() {
v31 := v30
v31.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- v31.EnableBatchVerification = true
v31.RewardsCalculationFix = true
v31.MaxProposedExpiredOnlineAccounts = 32
@@ -1131,27 +1150,59 @@ func initConsensusProtocols() {
// v31 can be upgraded to v32, with an update delay of 7 days ( see calculation above )
v31.ApprovedUpgrades[protocol.ConsensusV32] = 140000
- // ConsensusFuture is used to test features that are implemented
- // but not yet released in a production protocol version.
- vFuture := v32
- vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ v33 := v32
+ v33.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // Make the accounts snapshot for round X at X-CatchpointLookback
+ // order to guarantee all nodes produce catchpoint at the same round.
+ v33.CatchpointLookback = 320
+
+ // Require MaxTxnLife + X blocks and headers preserved by a node
+ v33.DeeperBlockHeaderHistory = 1
+
+ v33.MaxTxnBytesPerBlock = 5 * 1024 * 1024
+
+ Consensus[protocol.ConsensusV33] = v33
- // FilterTimeout for period 0 should take a new optimized, configured value, need to revisit this later
- vFuture.AgreementFilterTimeoutPeriod0 = 4 * time.Second
+ // v32 can be upgraded to v33, with an update delay of 7 days ( see calculation above )
+ v32.ApprovedUpgrades[protocol.ConsensusV33] = 140000
- // Enable compact certificates.
- vFuture.CompactCertRounds = 256
- vFuture.CompactCertTopVoters = 1024 * 1024
- vFuture.CompactCertVotersLookback = 16
- vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
- vFuture.CompactCertSecKQ = 128
+ v34 := v33
+ v34.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.LogicSigVersion = 7 // When moving this to a release, put a new higher LogicSigVersion here
- vFuture.MinInnerApplVersion = 4
+ // Enable state proofs.
+ v34.StateProofInterval = 256
+ v34.StateProofTopVoters = 1024
+ v34.StateProofVotersLookback = 16
+ v34.StateProofWeightThreshold = (1 << 32) * 30 / 100
+ v34.StateProofStrengthTarget = 256
+ v34.StateProofMaxRecoveryIntervals = 10
- vFuture.UnifyInnerTxIDs = true
+ v34.LogicSigVersion = 7
+ v34.MinInnerApplVersion = 4
+
+ v34.UnifyInnerTxIDs = true
+
+ v34.EnableSHA256TxnCommitmentHeader = true
+ v34.EnableOnlineAccountCatchpoints = true
+
+ v34.UnfundedSenders = true
+
+ v34.AgreementFilterTimeoutPeriod0 = 3400 * time.Millisecond
+
+ Consensus[protocol.ConsensusV34] = v34
+
+ // v33 can be upgraded to v34, with an update delay of 12h:
+ // 10046 = (12 * 60 * 60 / 4.3)
+ // for the sake of future manual calculations, we'll round that down a bit :
+ v33.ApprovedUpgrades[protocol.ConsensusV34] = 10000
+
+ // ConsensusFuture is used to test features that are implemented
+ // but not yet released in a production protocol version.
+ vFuture := v34
+ vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.EnableSHA256TxnCommitmentHeader = true
+ vFuture.LogicSigVersion = 8 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/consensus_test.go b/config/consensus_test.go
index 92edf1798..dd77ed499 100644
--- a/config/consensus_test.go
+++ b/config/consensus_test.go
@@ -59,13 +59,13 @@ func TestConsensusUpgradeWindow(t *testing.T) {
}
}
-func TestConsensusCompactCertParams(t *testing.T) {
+func TestConsensusStateProofParams(t *testing.T) {
partitiontest.PartitionTest(t)
for _, params := range Consensus {
- if params.CompactCertRounds != 0 {
- require.Equal(t, uint64(1<<16), (params.MaxKeyregValidPeriod+1)/params.CompactCertRounds,
- "Validity period divided by CompactCertRounds should allow for no more than %d generated keys", 1<<16)
+ if params.StateProofInterval != 0 {
+ require.Equal(t, uint64(1<<16), (params.MaxKeyregValidPeriod+1)/params.StateProofInterval,
+ "Validity period divided by StateProofInterval should allow for no more than %d generated keys", 1<<16)
}
}
}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 74c69c0cf..8a8120c5f 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,10 +41,10 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23"`
// environmental (may be overridden)
- // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
+ // When enabled, stores blocks indefinitely, otherwise, only the most recent blocks
// are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
Archival bool `version[0]:"false"`
@@ -162,7 +162,7 @@ type Local struct {
SuggestedFeeBlockHistory int `version[0]:"3"`
// TxPoolSize is the number of transactions that fit in the transaction pool
- TxPoolSize int `version[0]:"50000" version[5]:"15000"`
+ TxPoolSize int `version[0]:"50000" version[5]:"15000" version[23]:"75000"`
// number of seconds allowed for syncing transactions
TxSyncTimeoutSeconds int64 `version[0]:"30"`
@@ -318,7 +318,8 @@ type Local struct {
// CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
// A value of -1 means "don't track catchpoints".
- // A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
+ // A value of 1 means "track catchpoints as long as CatchpointInterval > 0".
+ // A value of 2 means "track catchpoints and always generate catchpoint files as long as CatchpointInterval > 0".
// A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
// Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
CatchpointTracking int64 `version[11]:"0"`
@@ -350,7 +351,7 @@ type Local struct {
NetworkMessageTraceServer string `version[13]:""`
// VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
- VerifiedTranscationsCacheSize int `version[14]:"30000"`
+ VerifiedTranscationsCacheSize int `version[14]:"30000" version[23]:"150000"`
// EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
// When enabled, the catchup service would use the archive servers before falling back to the relays.
@@ -425,7 +426,7 @@ type Local struct {
TransactionSyncSignificantMessageThreshold uint64 `version[17]:"0"`
// ProposalAssemblyTime is the max amount of time to spend on generating a proposal block.
- ProposalAssemblyTime time.Duration `version[19]:"250000000"`
+ ProposalAssemblyTime time.Duration `version[19]:"250000000" version[23]:"500000000"`
// When the number of http connections to the REST layer exceeds the soft limit,
// we start returning http code 429 Too Many Requests.
@@ -447,6 +448,10 @@ type Local struct {
// AgreementIncomingBundlesQueueLength sets the size of the buffer holding incoming bundles.
AgreementIncomingBundlesQueueLength uint64 `version[21]:"7"`
+
+ // MaxAcctLookback sets the maximum lookback range for account states,
+ // i.e. the ledger can answer account states questions for the range Latest-MaxAcctLookback...Latest
+ MaxAcctLookback uint64 `version[23]:"4"`
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
diff --git a/config/local_defaults.go b/config/local_defaults.go
index b8354926b..4b6017050 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 22,
+ Version: 23,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 7,
@@ -85,6 +85,7 @@ var defaultLocal = Local{
LogArchiveName: "node.archive.log",
LogSizeLimit: 1073741824,
MaxAPIResourcesPerAccount: 100000,
+ MaxAcctLookback: 4,
MaxCatchpointDownloadDuration: 7200000000000,
MaxConnectionsPerIP: 30,
MinCatchpointFileDownloadBytesPerSecond: 20480,
@@ -100,7 +101,7 @@ var defaultLocal = Local{
PeerConnectionsUpdateInterval: 3600,
PeerPingPeriodSeconds: 0,
PriorityPeers: map[string]bool{},
- ProposalAssemblyTime: 250000000,
+ ProposalAssemblyTime: 500000000,
PublicAddress: "",
ReconnectTime: 60000000000,
ReservedFDs: 256,
@@ -117,10 +118,10 @@ var defaultLocal = Local{
TransactionSyncDataExchangeRate: 0,
TransactionSyncSignificantMessageThreshold: 0,
TxPoolExponentialIncreaseFactor: 2,
- TxPoolSize: 15000,
+ TxPoolSize: 75000,
TxSyncIntervalSeconds: 60,
TxSyncServeResponseSize: 1000000,
TxSyncTimeoutSeconds: 30,
UseXForwardedForAddressField: "",
- VerifiedTranscationsCacheSize: 30000,
+ VerifiedTranscationsCacheSize: 150000,
}
diff --git a/config/version.go b/config/version.go
index c85775d55..37752c90e 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 8
+const VersionMinor = 9
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index baedf4cd1..cce8e06d7 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -43,10 +43,9 @@ import (
// BatchVerifier enqueues signatures to be validated in batch.
type BatchVerifier struct {
- messages []Hashable // contains a slice of messages to be hashed. Each message is varible length
- publicKeys []SignatureVerifier // contains a slice of public keys. Each individual public key is 32 bytes.
- signatures []Signature // contains a slice of signatures keys. Each individual signature is 64 bytes.
- useBatchVerification bool
+ messages []Hashable // contains a slice of messages to be hashed. Each message is varible length
+ publicKeys []SignatureVerifier // contains a slice of public keys. Each individual public key is 32 bytes.
+ signatures []Signature // contains a slice of signatures keys. Each individual signature is 64 bytes.
}
const minBatchVerifierAlloc = 16
@@ -63,31 +62,22 @@ func ed25519_randombytes_unsafe(p unsafe.Pointer, len C.size_t) {
RandBytes(randBuf)
}
-// MakeBatchVerifierWithAlgorithmDefaultSize create a BatchVerifier instance. This function pre-allocates
-// amount of free space to enqueue signatures without expanding. this function always use the batch
-// verification algorithm
-func MakeBatchVerifierWithAlgorithmDefaultSize() *BatchVerifier {
- return MakeBatchVerifier(minBatchVerifierAlloc, true)
+// MakeBatchVerifier creates a BatchVerifier instance.
+func MakeBatchVerifier() *BatchVerifier {
+ return MakeBatchVerifierWithHint(minBatchVerifierAlloc)
}
-// MakeBatchVerifierDefaultSize create a BatchVerifier instance. This function pre-allocates
+// MakeBatchVerifierWithHint creates a BatchVerifier instance. This function pre-allocates
// amount of free space to enqueue signatures without expanding
-func MakeBatchVerifierDefaultSize(enableBatchVerification bool) *BatchVerifier {
- return MakeBatchVerifier(minBatchVerifierAlloc, enableBatchVerification)
-}
-
-// MakeBatchVerifier create a BatchVerifier instance. This function pre-allocates
-// a given space so it will not expaned the storage
-func MakeBatchVerifier(hint int, enableBatchVerification bool) *BatchVerifier {
+func MakeBatchVerifierWithHint(hint int) *BatchVerifier {
// preallocate enough storage for the expected usage. We will reallocate as needed.
if hint < minBatchVerifierAlloc {
hint = minBatchVerifierAlloc
}
return &BatchVerifier{
- messages: make([]Hashable, 0, hint),
- publicKeys: make([]SignatureVerifier, 0, hint),
- signatures: make([]Signature, 0, hint),
- useBatchVerification: enableBatchVerification,
+ messages: make([]Hashable, 0, hint),
+ publicKeys: make([]SignatureVerifier, 0, hint),
+ signatures: make([]Signature, 0, hint),
}
}
@@ -126,27 +116,15 @@ func (b *BatchVerifier) Verify() error {
return ErrZeroTransactionInBatch
}
- if b.useBatchVerification {
- var messages = make([][]byte, b.GetNumberOfEnqueuedSignatures())
- for i, m := range b.messages {
- messages[i] = HashRep(m)
- }
- if batchVerificationImpl(messages, b.publicKeys, b.signatures) {
- return nil
- }
- return ErrBatchVerificationFailed
+ var messages = make([][]byte, b.GetNumberOfEnqueuedSignatures())
+ for i, m := range b.messages {
+ messages[i] = HashRep(m)
}
- return b.verifyOneByOne()
-}
-
-func (b *BatchVerifier) verifyOneByOne() error {
- for i := range b.messages {
- verifier := b.publicKeys[i]
- if !verifier.Verify(b.messages[i], b.signatures[i], false) {
- return ErrBatchVerificationFailed
- }
+ if batchVerificationImpl(messages, b.publicKeys, b.signatures) {
+ return nil
}
- return nil
+ return ErrBatchVerificationFailed
+
}
// batchVerificationImpl invokes the ed25519 batch verification algorithm.
diff --git a/crypto/batchverifier_test.go b/crypto/batchverifier_test.go
index 9a9c49160..781a80e17 100644
--- a/crypto/batchverifier_test.go
+++ b/crypto/batchverifier_test.go
@@ -27,7 +27,7 @@ import (
func TestBatchVerifierSingle(t *testing.T) {
partitiontest.PartitionTest(t)
// test expected success
- bv := MakeBatchVerifierWithAlgorithmDefaultSize()
+ bv := MakeBatchVerifier()
msg := randString()
var s Seed
RandBytes(s[:])
@@ -37,7 +37,7 @@ func TestBatchVerifierSingle(t *testing.T) {
require.NoError(t, bv.Verify())
// test expected failure
- bv = MakeBatchVerifierWithAlgorithmDefaultSize()
+ bv = MakeBatchVerifier()
msg = randString()
RandBytes(s[:])
sigSecrets = GenerateSignatureSecrets(s)
@@ -52,7 +52,7 @@ func TestBatchVerifierBulk(t *testing.T) {
partitiontest.PartitionTest(t)
for i := 1; i < 64*2+3; i++ {
n := i
- bv := MakeBatchVerifier(n, true)
+ bv := MakeBatchVerifierWithHint(n)
var s Seed
for i := 0; i < n; i++ {
@@ -71,7 +71,7 @@ func TestBatchVerifierBulk(t *testing.T) {
func TestBatchVerifierBulkWithExpand(t *testing.T) {
partitiontest.PartitionTest(t)
n := 64
- bv := MakeBatchVerifierWithAlgorithmDefaultSize()
+ bv := MakeBatchVerifier()
var s Seed
RandBytes(s[:])
@@ -87,7 +87,7 @@ func TestBatchVerifierBulkWithExpand(t *testing.T) {
func TestBatchVerifierWithInvalidSiganture(t *testing.T) {
partitiontest.PartitionTest(t)
n := 64
- bv := MakeBatchVerifierWithAlgorithmDefaultSize()
+ bv := MakeBatchVerifier()
var s Seed
RandBytes(s[:])
@@ -109,7 +109,7 @@ func TestBatchVerifierWithInvalidSiganture(t *testing.T) {
func BenchmarkBatchVerifier(b *testing.B) {
c := makeCurve25519Secret()
- bv := MakeBatchVerifier(1, true)
+ bv := MakeBatchVerifierWithHint(1)
for i := 0; i < b.N; i++ {
str := randString()
bv.EnqueueSignature(c.SignatureVerifier, str, c.Sign(str))
@@ -121,6 +121,6 @@ func BenchmarkBatchVerifier(b *testing.B) {
func TestEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
- bv := MakeBatchVerifierWithAlgorithmDefaultSize()
+ bv := MakeBatchVerifier()
require.Error(t, bv.Verify())
}
diff --git a/crypto/compactcert/bigfloat.go b/crypto/compactcert/bigfloat.go
deleted file mode 100644
index eb4004736..000000000
--- a/crypto/compactcert/bigfloat.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "fmt"
- "math/bits"
-)
-
-// A bigFloat represents the number mantissa*2^exp, which must be non-zero.
-//
-// A canonical representation is one where the highest bit of mantissa is
-// set. Every operation enforces canonicality of results.
-//
-// We use 32-bit values here to avoid requiring a 64bit-by-64bit-to-128bit
-// multiply operation for anyone that needs to implement this (even though
-// Go has this operation, as bits.Mul64).
-type bigFloat struct {
- mantissa uint32
- exp int32
-}
-
-// Each bigFloat is associated with a rounding mode (up, away from zero, or
-// down, towards zero). This is reflected by these two types of bigFloat.
-type bigFloatUp struct {
- bigFloat
-}
-
-type bigFloatDn struct {
- bigFloat
-}
-
-// canonicalize() ensures that the bigFloat is canonical.
-func (a *bigFloat) canonicalize() {
- if a.mantissa == 0 {
- // Just to avoid infinite loops in some error case.
- return
- }
-
- for (a.mantissa & (1 << 31)) == 0 {
- a.mantissa = a.mantissa << 1
- a.exp = a.exp - 1
- }
-}
-
-// doRoundUp adds one to the mantissa of a canonical bigFloat
-// to implement the rounding-up when there are leftover low bits.
-func (a *bigFloatUp) doRoundUp() {
- if a.mantissa == (1<<32)-1 {
- a.mantissa = 1 << 31
- a.exp++
- } else {
- a.mantissa++
- }
-}
-
-// geRaw returns whether a>=b. The Raw suffix indicates that
-// this comparison does not take rounding into account, and might
-// not be true if done with arbitrary-precision numbers.
-func (a *bigFloat) geRaw(b *bigFloat) bool {
- if a.exp > b.exp {
- return true
- }
-
- if a.exp < b.exp {
- return false
- }
-
- return a.mantissa >= b.mantissa
-}
-
-// ge returns whether a>=b. It requires that a was computed with
-// rounding-down and b was computed with rounding-up, so that if
-// ge returns true, the arbitrary-precision computation would have
-// also been >=.
-func (a *bigFloatDn) ge(b *bigFloatUp) bool {
- return a.geRaw(&b.bigFloat)
-}
-
-// setu64Dn sets the value to the supplied uint64 (which might get
-// rounded down in the process). x must not be zero. truncated
-// returns whether any non-zero bits were truncated (rounded down).
-func (a *bigFloat) setu64Dn(x uint64) (truncated bool, err error) {
- if x == 0 {
- return false, fmt.Errorf("bigFloat cannot be zero")
- }
-
- e := int32(0)
-
- for x >= (1 << 32) {
- if (x & 1) != 0 {
- truncated = true
- }
-
- x = x >> 1
- e = e + 1
- }
-
- a.mantissa = uint32(x)
- a.exp = e
- a.canonicalize()
- return
-}
-
-// setu64 calls setu64Dn and implements rounding based on the type.
-func (a *bigFloatUp) setu64(x uint64) error {
- truncated, err := a.setu64Dn(x)
- if truncated {
- a.doRoundUp()
- }
- return err
-}
-
-func (a *bigFloatDn) setu64(x uint64) error {
- _, err := a.setu64Dn(x)
- return err
-}
-
-// setu32 sets the value to the supplied uint32.
-func (a *bigFloat) setu32(x uint32) error {
- if x == 0 {
- return fmt.Errorf("bigFloat cannot be zero")
- }
-
- a.mantissa = x
- a.exp = 0
- a.canonicalize()
- return nil
-}
-
-// setpow2 sets the value to 2^x.
-func (a *bigFloat) setpow2(x int32) {
- a.mantissa = 1
- a.exp = x
- a.canonicalize()
-}
-
-// mulDn sets a to the product a*b, keeping the most significant 32 bits
-// of the product's mantissa. The return value indicates if any non-zero
-// bits were discarded (rounded down).
-func (a *bigFloat) mulDn(b *bigFloat) bool {
- hi, lo := bits.Mul32(a.mantissa, b.mantissa)
-
- a.mantissa = hi
- a.exp = a.exp + b.exp + 32
-
- if (a.mantissa & (1 << 31)) == 0 {
- a.mantissa = (a.mantissa << 1) | (lo >> 31)
- a.exp = a.exp - 1
- lo = lo << 1
- }
-
- return lo != 0
-}
-
-// mul calls mulDn and implements appropriate rounding.
-// Types prevent multiplying two values with different rounding types.
-func (a *bigFloatUp) mul(b *bigFloatUp) {
- truncated := a.mulDn(&b.bigFloat)
- if truncated {
- a.doRoundUp()
- }
-}
-
-func (a *bigFloatDn) mul(b *bigFloatDn) {
- a.mulDn(&b.bigFloat)
-}
-
-// String returns a string representation of a.
-func (a *bigFloat) String() string {
- return fmt.Sprintf("%d*2^%d", a.mantissa, a.exp)
-}
diff --git a/crypto/compactcert/bigfloat_test.go b/crypto/compactcert/bigfloat_test.go
deleted file mode 100644
index 12efcf38b..000000000
--- a/crypto/compactcert/bigfloat_test.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "math/big"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func rand32() uint32 {
- return uint32(crypto.RandUint64() & 0xffffffff)
-}
-
-func TestBigFloatRounding(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := &bigFloatDn{}
- b := &bigFloatUp{}
-
- a.setu64(1 << 63)
- b.setu64(1 << 63)
-
- require.True(t, a.geRaw(&b.bigFloat))
- require.True(t, b.geRaw(&a.bigFloat))
-
- a.mul(a)
- b.mul(b)
-
- require.True(t, a.geRaw(&b.bigFloat))
- require.True(t, b.geRaw(&a.bigFloat))
-
- a.setu64((1 << 64) - 1)
- b.setu64((1 << 64) - 1)
-
- require.False(t, a.geRaw(&b.bigFloat))
- require.True(t, b.geRaw(&a.bigFloat))
-
- a.setu32((1 << 32) - 1)
- b.setu32((1 << 32) - 1)
-
- a.mul(a)
- b.mul(b)
-
- require.False(t, a.geRaw(&b.bigFloat))
- require.True(t, b.geRaw(&a.bigFloat))
-}
-
-func TestBigFloat(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := &bigFloatDn{}
- b := &bigFloatDn{}
-
- a.setu64(1)
- require.Equal(t, a.mantissa, uint32(1<<31))
- require.Equal(t, a.exp, int32(-31))
-
- a.setu32(1)
- require.Equal(t, a.mantissa, uint32(1<<31))
- require.Equal(t, a.exp, int32(-31))
-
- for i := int32(-256); i < 256; i++ {
- a.setpow2(i)
- require.Equal(t, a.mantissa, uint32(1<<31))
- require.Equal(t, a.exp, i-31)
- }
-
- for i := 0; i < 8192; i++ {
- x := rand32()
- a.setu32(x)
- require.True(t, a.exp <= 0)
- require.Equal(t, x, a.mantissa>>(-a.exp))
- }
-
- for i := 0; i < 8192; i++ {
- x := uint64(rand32())
- a.setu64(x)
- if a.exp <= 0 {
- require.Equal(t, x, uint64(a.mantissa>>(-a.exp)))
- }
- if a.exp >= 0 {
- require.Equal(t, x>>a.exp, uint64(a.mantissa))
- }
- }
-
- for i := 0; i < 8192; i++ {
- x := crypto.RandUint64()
- a.setu64(x)
- if a.exp <= 0 {
- require.Equal(t, x, uint64(a.mantissa>>(-a.exp)))
- }
- if a.exp >= 0 {
- require.Equal(t, x>>a.exp, uint64(a.mantissa))
- }
- }
-
- for i := 0; i < 8192; i++ {
- x := rand32()
- y := rand32()
- a.setu64(uint64(x))
- b.setu64(uint64(y))
-
- require.Equal(t, x >= y, a.geRaw(&b.bigFloat))
- require.Equal(t, x < y, b.geRaw(&a.bigFloat))
- require.True(t, a.geRaw(&a.bigFloat))
- require.True(t, b.geRaw(&b.bigFloat))
- }
-
- xx := &big.Int{}
- yy := &big.Int{}
-
- for i := 0; i < 8192; i++ {
- x := rand32()
- y := rand32()
- a.setu64(uint64(x))
- b.setu64(uint64(y))
- a.mul(b)
-
- xx.SetUint64(uint64(x))
- yy.SetUint64(uint64(y))
- xx.Mul(xx, yy)
- if a.exp > 0 {
- xx.Rsh(xx, uint(a.exp))
- }
- if a.exp < 0 {
- xx.Lsh(xx, uint(-a.exp))
- }
- require.Equal(t, a.mantissa, uint32(xx.Uint64()))
- }
-}
-
-func BenchmarkBigFloatMulUp(b *testing.B) {
- a := &bigFloatUp{}
- a.setu32((1 << 32) - 1)
-
- for i := 0; i < b.N; i++ {
- a.mul(a)
- }
-}
-
-func BenchmarkBigFloatMulDn(b *testing.B) {
- a := &bigFloatDn{}
- a.setu32((1 << 32) - 1)
-
- for i := 0; i < b.N; i++ {
- a.mul(a)
- }
-}
diff --git a/crypto/compactcert/builder.go b/crypto/compactcert/builder.go
deleted file mode 100644
index 5ebacba03..000000000
--- a/crypto/compactcert/builder.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "fmt"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/merklearray"
- "github.com/algorand/go-algorand/crypto/merklesignature"
- "github.com/algorand/go-algorand/data/basics"
-)
-
-//msgp:ignore sigslot
-type sigslot struct {
- // Weight is the weight of the participant signing this message.
- // This information is tracked here for convenience, but it does
- // not appear in the commitment to the sigs array; it comes from
- // the Weight field of the corresponding participant.
- Weight uint64
-
- // Include the parts of the sigslot that form the commitment to
- // the sigs array.
- sigslotCommit
-}
-
-// Builder keeps track of signatures on a message and eventually produces
-// a compact certificate for that message.
-type Builder struct {
- Params
-
- sigs []sigslot // Indexed by pos in participants
- sigsHasValidL bool // The L values in sigs are consistent with weights
- signedWeight uint64 // Total weight of signatures so far
- participants []basics.Participant
- parttree *merklearray.Tree
-
- // Cached cert, if Build() was called and no subsequent
- // Add() calls were made.
- cert *Cert
-}
-
-// MkBuilder constructs an empty builder (with no signatures). The message
-// to be signed, as well as other security parameters, are specified in
-// param. The participants that will sign the message are in part and
-// parttree.
-func MkBuilder(param Params, part []basics.Participant, parttree *merklearray.Tree) (*Builder, error) {
- npart := len(part)
-
- b := &Builder{
- Params: param,
- sigs: make([]sigslot, npart),
- sigsHasValidL: false,
- signedWeight: 0,
- participants: part,
- parttree: parttree,
- }
-
- return b, nil
-}
-
-// Present checks if the builder already contains a signature at a particular
-// offset.
-func (b *Builder) Present(pos uint64) bool {
- return b.sigs[pos].Weight != 0
-}
-
-// Add a signature to the set of signatures available for building a certificate.
-// verifySig should be set to true in production; setting it to false is useful
-// for benchmarking to avoid the cost of signature checks.
-func (b *Builder) Add(pos uint64, sig merklesignature.Signature, verifySig bool) error {
- if b.Present(pos) {
- return fmt.Errorf("position %d already added", pos)
- }
-
- // Check participants array
- if pos >= uint64(len(b.participants)) {
- return fmt.Errorf("pos %d >= len(participants) %d", pos, len(b.participants))
- }
-
- p := b.participants[pos]
-
- if p.Weight == 0 {
- return fmt.Errorf("position %d has zero weight", pos)
- }
-
- // Check signature
- if verifySig {
- if err := p.PK.Verify(uint64(b.SigRound), b.Msg, sig); err != nil {
- return err
- }
- }
-
- // Remember the signature
- b.sigs[pos].Weight = p.Weight
- b.sigs[pos].Sig.Signature = sig
- b.signedWeight += p.Weight
- b.cert = nil
- b.sigsHasValidL = false
- return nil
-}
-
-// Ready returns whether the certificate is ready to be built.
-func (b *Builder) Ready() bool {
- return b.signedWeight > b.Params.ProvenWeight
-}
-
-// SignedWeight returns the total weight of signatures added so far.
-func (b *Builder) SignedWeight() uint64 {
- return b.signedWeight
-}
-
-// coinIndex returns the position pos in the sigs array such that the sum
-// of all signature weights before pos is less than or equal to coinWeight,
-// but the sum of all signature weights up to and including pos exceeds
-// coinWeight.
-//
-// coinIndex works by doing a binary search on the sigs array.
-func (b *Builder) coinIndex(coinWeight uint64) (uint64, error) {
- if !b.sigsHasValidL {
- return 0, fmt.Errorf("coinIndex: need valid L values")
- }
-
- lo := uint64(0)
- hi := uint64(len(b.sigs))
-
-again:
- if lo >= hi {
- return 0, fmt.Errorf("coinIndex: lo %d >= hi %d", lo, hi)
- }
-
- mid := (lo + hi) / 2
- if coinWeight < b.sigs[mid].L {
- hi = mid
- goto again
- }
-
- if coinWeight < b.sigs[mid].L+b.sigs[mid].Weight {
- return mid, nil
- }
-
- lo = mid + 1
- goto again
-}
-
-// Build returns a compact certificate, if the builder has accumulated
-// enough signatures to construct it.
-func (b *Builder) Build() (*Cert, error) {
- if b.cert != nil {
- return b.cert, nil
- }
-
- if b.signedWeight <= b.Params.ProvenWeight {
- return nil, fmt.Errorf("not enough signed weight: %d <= %d", b.signedWeight, b.Params.ProvenWeight)
- }
-
- // Commit to the sigs array
- for i := 1; i < len(b.sigs); i++ {
- b.sigs[i].L = b.sigs[i-1].L + b.sigs[i-1].Weight
- }
- b.sigsHasValidL = true
-
- hfactory := crypto.HashFactory{HashType: HashType}
- sigtree, err := merklearray.BuildVectorCommitmentTree(committableSignatureSlotArray(b.sigs), hfactory)
- if err != nil {
- return nil, err
- }
-
- // Reveal sufficient number of signatures
- c := &Cert{
- SigCommit: sigtree.Root(),
- SignedWeight: b.signedWeight,
- Reveals: make(map[uint64]Reveal),
- }
-
- nr, err := b.numReveals(b.signedWeight)
- if err != nil {
- return nil, err
- }
-
- var proofPositions []uint64
- msgHash := crypto.GenericHashObj(hfactory.NewHash(), b.Msg)
-
- for j := uint64(0); j < nr; j++ {
- choice := coinChoice{
- J: j,
- SignedWeight: c.SignedWeight,
- ProvenWeight: b.ProvenWeight,
- Sigcom: c.SigCommit,
- Partcom: b.parttree.Root(),
- MsgHash: msgHash,
- }
-
- coin := hashCoin(choice)
- pos, err := b.coinIndex(coin)
- if err != nil {
- return nil, err
- }
-
- if pos >= uint64(len(b.participants)) {
- return nil, fmt.Errorf("pos %d >= len(participants) %d", pos, len(b.participants))
- }
-
- // If we already revealed pos, no need to do it again
- _, alreadyRevealed := c.Reveals[pos]
- if alreadyRevealed {
- continue
- }
-
- // Generate the reveal for pos
- c.Reveals[pos] = Reveal{
- SigSlot: b.sigs[pos].sigslotCommit,
- Part: b.participants[pos],
- }
-
- proofPositions = append(proofPositions, pos)
- }
-
- sigProofs, err := sigtree.Prove(proofPositions)
- if err != nil {
- return nil, err
- }
-
- partProofs, err := b.parttree.Prove(proofPositions)
- if err != nil {
- return nil, err
- }
-
- c.SigProofs = *sigProofs
- c.PartProofs = *partProofs
-
- return c, nil
-}
diff --git a/crypto/compactcert/common.go b/crypto/compactcert/common.go
deleted file mode 100644
index f1f359851..000000000
--- a/crypto/compactcert/common.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "fmt"
- "math/big"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// The coinChoice type defines the fields that go into the hash for choosing
-// the index of the coin to reveal as part of the compact certificate.
-type coinChoice struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- J uint64 `codec:"j"`
- SignedWeight uint64 `codec:"sigweight"`
- ProvenWeight uint64 `codec:"provenweight"`
- Sigcom crypto.GenericDigest `codec:"sigcom"`
- Partcom crypto.GenericDigest `codec:"partcom"`
- MsgHash crypto.GenericDigest `codec:"msghash"`
-}
-
-// ToBeHashed implements the crypto.Hashable interface.
-func (cc coinChoice) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.CompactCertCoin, protocol.Encode(&cc)
-}
-
-// hashCoin returns a number in [0, choice.SignedWeight) with a nearly uniform
-// distribution, "randomized" by all of the fields in choice.
-func hashCoin(choice coinChoice) uint64 {
- h := crypto.HashObj(choice)
-
- i := &big.Int{}
- i.SetBytes(h[:])
-
- w := &big.Int{}
- w.SetUint64(choice.SignedWeight)
-
- res := &big.Int{}
- res.Mod(i, w)
- return res.Uint64()
-}
-
-// numReveals computes the number of reveals necessary to achieve the desired
-// security parameters. See section 8 of the ``Compact Certificates''
-// document for the analysis.
-//
-// numReveals is the smallest number that satisfies
-//
-// 2^-k >= 2^q * (provenWeight / signedWeight) ^ numReveals
-//
-// which is equivalent to the following:
-//
-// signedWeight ^ numReveals >= 2^(k+q) * provenWeight ^ numReveals
-//
-// To ensure that rounding errors do not reduce the security parameter,
-// we compute the left-hand side with rounding-down, and compute the
-// right-hand side with rounding-up.
-func numReveals(signedWeight uint64, provenWeight uint64, secKQ uint64, bound uint64) (uint64, error) {
- n := uint64(0)
-
- sw := &bigFloatDn{}
- err := sw.setu64(signedWeight)
- if err != nil {
- return 0, err
- }
-
- pw := &bigFloatUp{}
- err = pw.setu64(provenWeight)
- if err != nil {
- return 0, err
- }
-
- lhs := &bigFloatDn{}
- err = lhs.setu64(1)
- if err != nil {
- return 0, err
- }
-
- rhs := &bigFloatUp{}
- rhs.setpow2(int32(secKQ))
-
- for {
- if lhs.ge(rhs) {
- return n, nil
- }
-
- if n >= bound {
- return 0, fmt.Errorf("numReveals(%d, %d, %d) > %d", signedWeight, provenWeight, secKQ, bound)
- }
-
- lhs.mul(sw)
- rhs.mul(pw)
- n++
- }
-}
-
-func (p Params) numReveals(signedWeight uint64) (uint64, error) {
- return numReveals(signedWeight, p.ProvenWeight, p.SecKQ, MaxReveals)
-}
diff --git a/crypto/compactcert/common_test.go b/crypto/compactcert/common_test.go
deleted file mode 100644
index 73e71b893..000000000
--- a/crypto/compactcert/common_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "testing"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestHashCoin(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var slots [32]uint64
- var sigcom = make(crypto.GenericDigest, HashSize)
- var partcom = make(crypto.GenericDigest, HashSize)
- var msgHash = make(crypto.GenericDigest, HashSize)
-
- crypto.RandBytes(sigcom[:])
- crypto.RandBytes(partcom[:])
- crypto.RandBytes(msgHash[:])
-
- for j := uint64(0); j < 1000; j++ {
- choice := coinChoice{
- J: j,
- SignedWeight: uint64(len(slots)),
- ProvenWeight: uint64(len(slots)),
- Sigcom: sigcom,
- Partcom: partcom,
- MsgHash: msgHash,
- }
-
- coin := hashCoin(choice)
- if coin >= uint64(len(slots)) {
- t.Errorf("hashCoin out of bounds")
- }
-
- slots[coin]++
- }
-
- for i, count := range slots {
- if count < 3 {
- t.Errorf("slot %d too low: %d", i, count)
- }
- if count > 100 {
- t.Errorf("slot %d too high: %d", i, count)
- }
- }
-}
-
-func BenchmarkHashCoin(b *testing.B) {
- var sigcom = make(crypto.GenericDigest, HashSize)
- var partcom = make(crypto.GenericDigest, HashSize)
- var msgHash = make(crypto.GenericDigest, HashSize)
-
- crypto.RandBytes(sigcom[:])
- crypto.RandBytes(partcom[:])
- crypto.RandBytes(msgHash[:])
-
- for i := 0; i < b.N; i++ {
- choice := coinChoice{
- J: uint64(i),
- SignedWeight: 1024,
- ProvenWeight: 1024,
- Sigcom: sigcom,
- Partcom: partcom,
- MsgHash: msgHash,
- }
-
- hashCoin(choice)
- }
-}
-
-func TestNumReveals(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- billion := uint64(1000 * 1000 * 1000)
- microalgo := uint64(1000 * 1000)
- provenWeight := 2 * billion * microalgo
- secKQ := uint64(compactCertSecKQForTests)
- bound := uint64(1000)
-
- for i := uint64(3); i < 10; i++ {
- signedWeight := i * billion * microalgo
- n, err := numReveals(signedWeight, provenWeight, secKQ, bound)
- if err != nil {
- t.Error(err)
- }
-
- if n < 50 || n > 300 {
- t.Errorf("numReveals(%d, %d, %d) = %d looks suspect",
- signedWeight, provenWeight, secKQ, n)
- }
- }
-}
-
-func BenchmarkNumReveals(b *testing.B) {
- billion := uint64(1000 * 1000 * 1000)
- microalgo := uint64(1000 * 1000)
- provenWeight := 100 * billion * microalgo
- signedWeight := 110 * billion * microalgo
- secKQ := uint64(compactCertSecKQForTests)
- bound := uint64(1000)
-
- nr, err := numReveals(signedWeight, provenWeight, secKQ, bound)
- if nr < 900 {
- b.Errorf("numReveals(%d, %d, %d) = %d < 900", signedWeight, provenWeight, secKQ, nr)
- }
-
- for i := 0; i < b.N; i++ {
- _, err = numReveals(signedWeight, provenWeight, secKQ, bound)
- if err != nil {
- b.Error(err)
- }
- }
-}
diff --git a/crypto/compactcert/const.go b/crypto/compactcert/const.go
deleted file mode 100644
index 53133205b..000000000
--- a/crypto/compactcert/const.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "github.com/algorand/go-algorand/crypto"
-)
-
-// HashType/ hashSize relate to the type of hash this package uses.
-const (
- HashType = crypto.Sumhash
- HashSize = crypto.SumhashDigestSize
-)
-
-const (
- // MaxReveals is a bound on allocation and on numReveals to limit log computation
- MaxReveals = 1024
-)
diff --git a/crypto/compactcert/verifier.go b/crypto/compactcert/verifier.go
deleted file mode 100644
index 5d9819a37..000000000
--- a/crypto/compactcert/verifier.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "fmt"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/merklearray"
-)
-
-// Verifier is used to verify a compact certificate.
-type Verifier struct {
- Params
-
- partcom crypto.GenericDigest
-}
-
-// MkVerifier constructs a verifier to check the compact certificate
-// on the message specified in p, with partcom specifying the Merkle
-// root of the participants that must sign the message.
-func MkVerifier(p Params, partcom crypto.GenericDigest) *Verifier {
- return &Verifier{
- Params: p,
- partcom: partcom,
- }
-}
-
-// Verify checks if c is a valid compact certificate for the message
-// and participants that were used to construct the Verifier.
-func (v *Verifier) Verify(c *Cert) error {
- if c.SignedWeight <= v.ProvenWeight {
- return fmt.Errorf("cert signed weight %d <= proven weight %d", c.SignedWeight, v.ProvenWeight)
- }
-
- sigs := make(map[uint64]crypto.Hashable)
- parts := make(map[uint64]crypto.Hashable)
- for pos, r := range c.Reveals {
- sig, err := buildCommittableSignature(r.SigSlot)
- if err != nil {
- return err
- }
-
- sigs[pos] = sig
- parts[pos] = r.Part
-
- // verify that the msg and the signature is valid under the given participant's Pk
- err = r.Part.PK.Verify(
- uint64(v.SigRound),
- v.Msg,
- r.SigSlot.Sig.Signature)
-
- if err != nil {
- return fmt.Errorf("signature in reveal pos %d does not verify. error is %s", pos, err)
- }
- }
-
- // verify all the reveals proofs on the signature tree.
- if err := merklearray.VerifyVectorCommitment(c.SigCommit[:], sigs, &c.SigProofs); err != nil {
- return err
- }
-
- // verify all the reveals proofs on the participant tree.
- if err := merklearray.VerifyVectorCommitment(v.partcom[:], parts, &c.PartProofs); err != nil {
- return err
- }
-
- // Verify that the reveals contain the right coins
- nr, err := v.numReveals(c.SignedWeight)
- if err != nil {
- return err
- }
-
- msgHash := crypto.GenericHashObj(c.PartProofs.HashFactory.NewHash(), v.Msg)
-
- for j := uint64(0); j < nr; j++ {
- choice := coinChoice{
- J: j,
- SignedWeight: c.SignedWeight,
- ProvenWeight: v.ProvenWeight,
- Sigcom: c.SigCommit,
- Partcom: v.partcom,
- MsgHash: msgHash,
- }
-
- coin := hashCoin(choice)
- matchingReveal := false
- for _, r := range c.Reveals {
- if r.SigSlot.L <= coin && coin < r.SigSlot.L+r.Part.Weight {
- matchingReveal = true
- break
- }
- }
-
- if !matchingReveal {
- return fmt.Errorf("no reveal for coin %d at %d", j, coin)
- }
- }
-
- return nil
-}
diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go
index fffa501f6..86c65f909 100644
--- a/crypto/crypto_test.go
+++ b/crypto/crypto_test.go
@@ -43,22 +43,22 @@ func randString() (b TestingHashable) {
func signVerify(t *testing.T, c *SignatureSecrets, c2 *SignatureSecrets) {
s := randString()
sig := c.Sign(s)
- if !c.Verify(s, sig, true) {
+ if !c.Verify(s, sig) {
t.Errorf("correct signature failed to verify (plain)")
}
s2 := randString()
sig2 := c.Sign(s2)
- if c.Verify(s, sig2, true) {
+ if c.Verify(s, sig2) {
t.Errorf("wrong message incorrectly verified (plain)")
}
sig3 := c2.Sign(s)
- if c.Verify(s, sig3, true) {
+ if c.Verify(s, sig3) {
t.Errorf("wrong key incorrectly verified (plain)")
}
- if c.Verify(s2, sig3, true) {
+ if c.Verify(s2, sig3) {
t.Errorf("wrong message+key incorrectly verified (plain)")
}
}
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index 87f3aa1e0..91dc5d63e 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -112,19 +112,14 @@ func ed25519Sign(secret ed25519PrivateKey, data []byte) (sig ed25519Signature) {
return
}
-func ed25519Verify(public ed25519PublicKey, data []byte, sig ed25519Signature, useBatchVerificationCompatibleVersion bool) bool {
+func ed25519Verify(public ed25519PublicKey, data []byte, sig ed25519Signature) bool {
// &data[0] will make Go panic if msg is zero length
d := (*C.uchar)(C.NULL)
if len(data) != 0 {
d = (*C.uchar)(&data[0])
}
// https://download.libsodium.org/doc/public-key_cryptography/public-key_signatures#detached-mode
- var result C.int
- if useBatchVerificationCompatibleVersion {
- result = C.crypto_sign_ed25519_bv_compatible_verify_detached((*C.uchar)(&sig[0]), d, C.ulonglong(len(data)), (*C.uchar)(&public[0]))
- } else {
- result = C.crypto_sign_ed25519_verify_detached((*C.uchar)(&sig[0]), d, C.ulonglong(len(data)), (*C.uchar)(&public[0]))
- }
+ result := C.crypto_sign_ed25519_bv_compatible_verify_detached((*C.uchar)(&sig[0]), d, C.ulonglong(len(data)), (*C.uchar)(&public[0]))
return result == 0
}
@@ -193,21 +188,21 @@ func SecretKeyToSeed(secret PrivateKey) (Seed, error) {
func GenerateSignatureSecrets(seed Seed) *SignatureSecrets {
pk0, sk := ed25519GenerateKeySeed(ed25519Seed(seed))
pk := SignatureVerifier(pk0)
- cryptoGenSigSecretsTotal.Inc(map[string]string{})
+ cryptoGenSigSecretsTotal.Inc(nil)
return &SignatureSecrets{SignatureVerifier: pk, SK: sk}
}
// Sign produces a cryptographic Signature of a Hashable message, given
// cryptographic secrets.
func (s *SignatureSecrets) Sign(message Hashable) Signature {
- cryptoSigSecretsSignTotal.Inc(map[string]string{})
+ cryptoSigSecretsSignTotal.Inc(nil)
return s.SignBytes(HashRep(message))
}
// SignBytes signs a message directly, without first hashing.
// Caller is responsible for domain separation.
func (s *SignatureSecrets) SignBytes(message []byte) Signature {
- cryptoSigSecretsSignBytesTotal.Inc(map[string]string{})
+ cryptoSigSecretsSignBytesTotal.Inc(nil)
return Signature(ed25519Sign(ed25519PrivateKey(s.SK), message))
}
@@ -216,15 +211,15 @@ func (s *SignatureSecrets) SignBytes(message []byte) Signature {
//
// It returns true if this is the case; otherwise, it returns false.
//
-func (v SignatureVerifier) Verify(message Hashable, sig Signature, useBatchVerificationCompatibleVersion bool) bool {
- cryptoSigSecretsVerifyTotal.Inc(map[string]string{})
- return ed25519Verify(ed25519PublicKey(v), HashRep(message), ed25519Signature(sig), useBatchVerificationCompatibleVersion)
+func (v SignatureVerifier) Verify(message Hashable, sig Signature) bool {
+ cryptoSigSecretsVerifyTotal.Inc(nil)
+ return ed25519Verify(ed25519PublicKey(v), HashRep(message), ed25519Signature(sig))
}
// VerifyBytes verifies a signature, where the message is not hashed first.
// Caller is responsible for domain separation.
// If the message is a Hashable, Verify() can be used instead.
-func (v SignatureVerifier) VerifyBytes(message []byte, sig Signature, useBatchVerificationCompatibleVersion bool) bool {
- cryptoSigSecretsVerifyBytesTotal.Inc(map[string]string{})
- return ed25519Verify(ed25519PublicKey(v), message, ed25519Signature(sig), useBatchVerificationCompatibleVersion)
+func (v SignatureVerifier) VerifyBytes(message []byte, sig Signature) bool {
+ cryptoSigSecretsVerifyBytesTotal.Inc(nil)
+ return ed25519Verify(ed25519PublicKey(v), message, ed25519Signature(sig))
}
diff --git a/crypto/curve25519_test.go b/crypto/curve25519_test.go
index e6f939fe2..27d153b9d 100644
--- a/crypto/curve25519_test.go
+++ b/crypto/curve25519_test.go
@@ -33,7 +33,7 @@ func TestSignVerifyEmptyMessage(t *testing.T) {
partitiontest.PartitionTest(t)
pk, sk := ed25519GenerateKey()
sig := ed25519Sign(sk, []byte{})
- if !ed25519Verify(pk, []byte{}, sig, true) {
+ if !ed25519Verify(pk, []byte{}, sig) {
t.Errorf("sig of an empty message failed to verify")
}
}
@@ -43,7 +43,7 @@ func TestVerifyZeros(t *testing.T) {
var pk SignatureVerifier
var sig Signature
for x := byte(0); x < 255; x++ {
- if pk.VerifyBytes([]byte{x}, sig, true) {
+ if pk.VerifyBytes([]byte{x}, sig) {
t.Errorf("Zero sig with zero pk successfully verified message %x", x)
}
}
@@ -84,7 +84,7 @@ func BenchmarkSignVerify(b *testing.B) {
for i := 0; i < b.N; i++ {
sig := c.Sign(s)
- _ = c.Verify(s, sig, true)
+ _ = c.Verify(s, sig)
}
}
@@ -97,7 +97,7 @@ func BenchmarkSign(b *testing.B) {
_ = c.Sign(s)
}
}
-func BenchmarkVerify(b *testing.B) {
+func BenchmarkVerify25519(b *testing.B) {
c := makeCurve25519Secret()
strs := make([]TestingHashable, b.N)
sigs := make([]Signature, b.N)
@@ -108,6 +108,6 @@ func BenchmarkVerify(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _ = c.Verify(strs[i], sigs[i], true)
+ _ = c.Verify(strs[i], sigs[i])
}
}
diff --git a/crypto/falconWrapper.go b/crypto/falconWrapper.go
index 8dea1990e..f24bc9433 100644
--- a/crypto/falconWrapper.go
+++ b/crypto/falconWrapper.go
@@ -102,13 +102,6 @@ func (d *FalconVerifier) GetFixedLengthHashableRepresentation() []byte {
return d.PublicKey[:]
}
-// GetSignatureFixedLengthHashableRepresentation returns a serialized version of the signature
-func (d *FalconVerifier) GetSignatureFixedLengthHashableRepresentation(signature FalconSignature) ([]byte, error) {
- compressedSignature := cfalcon.CompressedSignature(signature)
- ctSignature, err := compressedSignature.ConvertToCT()
- return ctSignature[:], err
-}
-
// NewFalconSigner creates a falconSigner that is used to sign and verify falcon signatures
func NewFalconSigner() (*FalconSigner, error) {
var seed FalconSeed
@@ -119,3 +112,15 @@ func NewFalconSigner() (*FalconSigner, error) {
}
return &signer, nil
}
+
+// GetFixedLengthHashableRepresentation returns a serialized version of the signature
+func (s FalconSignature) GetFixedLengthHashableRepresentation() ([]byte, error) {
+ compressedSignature := cfalcon.CompressedSignature(s)
+ ctSignature, err := compressedSignature.ConvertToCT()
+ return ctSignature[:], err
+}
+
+// IsSaltVersionEqual of the signature matches the given version
+func (s FalconSignature) IsSaltVersionEqual(version byte) bool {
+ return (*cfalcon.CompressedSignature)(&s).SaltVersion() == version
+}
diff --git a/crypto/falconWrapper_test.go b/crypto/falconWrapper_test.go
index b659ce56e..20b5441ee 100644
--- a/crypto/falconWrapper_test.go
+++ b/crypto/falconWrapper_test.go
@@ -102,9 +102,28 @@ func TestFalconsFormatConversion(t *testing.T) {
falconSig := falcon.CompressedSignature(sig)
ctFormat, err := falconSig.ConvertToCT()
- rawFormat, err := key.GetVerifyingKey().GetSignatureFixedLengthHashableRepresentation(sig)
+ rawFormat, err := sig.GetFixedLengthHashableRepresentation()
a.NoError(err)
a.NotEqual([]byte(sig), rawFormat)
a.Equal(ctFormat[:], rawFormat)
}
+
+func TestFalconSignature_ValidateVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ msg := TestingHashable{data: []byte("Neque porro quisquam est qui dolorem ipsum quia dolor sit amet")}
+ var seed FalconSeed
+ SystemRNG.RandBytes(seed[:])
+ key, err := GenerateFalconSigner(seed)
+ a.NoError(err)
+
+ byteSig, err := key.Sign(msg)
+ a.NoError(err)
+
+ a.True(byteSig.IsSaltVersionEqual(falcon.CurrentSaltVersion))
+
+ byteSig[1]++
+ a.False(byteSig.IsSaltVersionEqual(falcon.CurrentSaltVersion))
+}
diff --git a/crypto/merklearray/merkle.go b/crypto/merklearray/merkle.go
index 4855a4c82..803eb5a25 100644
--- a/crypto/merklearray/merkle.go
+++ b/crypto/merklearray/merkle.go
@@ -44,6 +44,7 @@ var (
ErrNonEmptyProofForEmptyElements = errors.New("non-empty proof for empty set of elements")
ErrUnexpectedTreeDepth = errors.New("unexpected tree depth")
ErrPosOutOfBound = errors.New("pos out of bound")
+ ErrProofLengthDigestSizeMismatch = errors.New("proof length and digest size mismatched")
)
// Tree is a Merkle tree, represented by layers of nodes (hashes) in the tree
diff --git a/crypto/merklearray/proof.go b/crypto/merklearray/proof.go
index ed05b76e6..2670f69f1 100644
--- a/crypto/merklearray/proof.go
+++ b/crypto/merklearray/proof.go
@@ -17,6 +17,8 @@
package merklearray
import (
+ "fmt"
+
"github.com/algorand/go-algorand/crypto"
)
@@ -83,7 +85,7 @@ func (p *SingleLeafProof) ToProof() *Proof {
return &p.Proof
}
-// GetConcatenatedProof concats the verification path to a single slice
+// GetConcatenatedProof concatenates the verification path to a single slice
// This function converts an empty element in the path (i.e occurs when the tree is not a full tree)
// into a sequence of digest result of zero.
func (p *SingleLeafProof) GetConcatenatedProof() []byte {
@@ -96,3 +98,33 @@ func (p *SingleLeafProof) GetConcatenatedProof() []byte {
}
return proofconcat
}
+
+// ProofDataToSingleLeafProof receives serialized proof data and uses it to construct a proof object.
+func ProofDataToSingleLeafProof(hashTypeData string, treeDepth uint64, proofBytes []byte) (SingleLeafProof, error) {
+ hashType, err := crypto.UnmarshalHashType(hashTypeData)
+ if err != nil {
+ return SingleLeafProof{}, err
+ }
+
+ var proof SingleLeafProof
+
+ proof.HashFactory = crypto.HashFactory{HashType: hashType}
+ proof.TreeDepth = uint8(treeDepth)
+
+ digestSize := proof.HashFactory.NewHash().Size()
+ if len(proofBytes)%digestSize != 0 {
+ return SingleLeafProof{}, fmt.Errorf("proof bytes length is %d, which is not a multiple of "+
+ "digest size %d: %w", len(proofBytes), digestSize, ErrProofLengthDigestSizeMismatch)
+ }
+
+ var proofPath []crypto.GenericDigest
+ for len(proofBytes) > 0 {
+ d := make([]byte, digestSize)
+ copy(d[:], proofBytes)
+ proofPath = append(proofPath, d[:])
+ proofBytes = proofBytes[len(d):]
+ }
+
+ proof.Path = proofPath
+ return proof, nil
+}
diff --git a/crypto/merklearray/proof_test.go b/crypto/merklearray/proof_test.go
index 547268515..4645dccb2 100644
--- a/crypto/merklearray/proof_test.go
+++ b/crypto/merklearray/proof_test.go
@@ -148,13 +148,22 @@ func TestConcatenatedProofsMissingChild(t *testing.T) {
p, err := tree.ProveSingleLeaf(6)
a.NoError(err)
- newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: []crypto.GenericDigest{}, HashFactory: p.HashFactory}}
-
- computedPath := recomputePath(p)
+ concatenatedProof := p.GetConcatenatedProof()
+ computedPath := recomputePath(concatenatedProof)
+ // verify that the concatenated proof can be verified correctly
+ newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: []crypto.GenericDigest{}, HashFactory: p.HashFactory}}
newP.Path = computedPath
err = Verify(tree.Root(), map[uint64]crypto.Hashable{6: array[6]}, newP.ToProof())
a.NoError(err)
+
+ recomputedProof, err := ProofDataToSingleLeafProof(p.HashFactory.HashType.String(), uint64(p.TreeDepth), concatenatedProof)
+ a.NoError(err)
+
+ // verify that we can reconstruct the original singleLeafProof from the concatenated proof
+ err = Verify(tree.Root(), map[uint64]crypto.Hashable{6: array[6]}, recomputedProof.ToProof())
+ a.NoError(err)
+
}
func TestConcatenatedProofsFullTree(t *testing.T) {
@@ -172,13 +181,20 @@ func TestConcatenatedProofsFullTree(t *testing.T) {
p, err := tree.ProveSingleLeaf(6)
a.NoError(err)
- newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: []crypto.GenericDigest{}, HashFactory: p.HashFactory}}
-
- computedPath := recomputePath(p)
+ concatenatedProof := p.GetConcatenatedProof()
+ computedPath := recomputePath(concatenatedProof)
- newP.Path = computedPath
+ // verify that the concatenated proof can be verified correctly
+ newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: computedPath, HashFactory: p.HashFactory}}
err = Verify(tree.Root(), map[uint64]crypto.Hashable{6: array[6]}, newP.ToProof())
a.NoError(err)
+
+ recomputedProof, err := ProofDataToSingleLeafProof(p.HashFactory.HashType.String(), uint64(p.TreeDepth), concatenatedProof)
+ a.NoError(err)
+
+ // verify that we can reconstruct the original singleLeafProof from the concatenated proof
+ err = Verify(tree.Root(), map[uint64]crypto.Hashable{6: array[6]}, recomputedProof.ToProof())
+ a.NoError(err)
}
func TestConcatenatedProofsOneLeaf(t *testing.T) {
@@ -194,23 +210,37 @@ func TestConcatenatedProofsOneLeaf(t *testing.T) {
p, err := tree.ProveSingleLeaf(0)
a.NoError(err)
- newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: []crypto.GenericDigest{}, HashFactory: p.HashFactory}}
-
- computedPath := recomputePath(p)
+ concatenatedProof := p.GetConcatenatedProof()
+ computedPath := recomputePath(concatenatedProof)
- newP.Path = computedPath
+ // verify that the concatenated proof can be verified correctly
+ newP := SingleLeafProof{Proof: Proof{TreeDepth: p.TreeDepth, Path: computedPath, HashFactory: p.HashFactory}}
err = Verify(tree.Root(), map[uint64]crypto.Hashable{0: array[0]}, newP.ToProof())
a.NoError(err)
+
+ recomputedProof, err := ProofDataToSingleLeafProof(p.HashFactory.HashType.String(), uint64(p.TreeDepth), concatenatedProof)
+ a.NoError(err)
+
+ // verify that we can reconstruct the original singleLeafProof from the concatenated proof
+ err = Verify(tree.Root(), map[uint64]crypto.Hashable{0: array[0]}, recomputedProof.ToProof())
+ a.NoError(err)
+}
+
+func TestProofDeserializationError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ _, err := ProofDataToSingleLeafProof(crypto.Sha256.String(), 1, []byte{1})
+ a.ErrorIs(err, ErrProofLengthDigestSizeMismatch)
}
-func recomputePath(p *SingleLeafProof) []crypto.GenericDigest {
+func recomputePath(p []byte) []crypto.GenericDigest {
var computedPath []crypto.GenericDigest
- proofconcat := p.GetConcatenatedProof()
- for len(proofconcat) > 0 {
+ for len(p) > 0 {
var d crypto.Digest
- copy(d[:], proofconcat)
+ copy(d[:], p)
computedPath = append(computedPath, d[:])
- proofconcat = proofconcat[len(d):]
+ p = p[len(d):]
}
return computedPath
}
diff --git a/crypto/merklesignature/committablePublicKeys.go b/crypto/merklesignature/committablePublicKeys.go
index 1c2c44736..7401c67ef 100644
--- a/crypto/merklesignature/committablePublicKeys.go
+++ b/crypto/merklesignature/committablePublicKeys.go
@@ -29,9 +29,9 @@ type (
// committablePublicKeyArray used to arrange the keys so a merkle tree could be build on them.
//msgp:ignore committablePublicKeyArray
committablePublicKeyArray struct {
- keys []crypto.FalconSigner
- firstValid uint64
- interval uint64
+ keys []crypto.FalconSigner
+ firstValid uint64
+ keyLifetime uint64
}
// CommittablePublicKey is used to create a binary representation of public keys in the merkle
@@ -59,7 +59,7 @@ func (k *committablePublicKeyArray) Marshal(pos uint64) (crypto.Hashable, error)
ephPK := CommittablePublicKey{
VerifyingKey: *k.keys[pos].GetVerifyingKey(),
- Round: indexToRound(k.firstValid, k.interval, pos),
+ Round: indexToRound(k.firstValid, k.keyLifetime, pos),
}
return &ephPK, nil
@@ -72,15 +72,15 @@ func (k *committablePublicKeyArray) Marshal(pos uint64) (crypto.Hashable, error)
func (e *CommittablePublicKey) ToBeHashed() (protocol.HashID, []byte) {
verifyingRawKey := e.VerifyingKey.GetFixedLengthHashableRepresentation()
- roundAsBytes := make([]byte, 8)
- binary.LittleEndian.PutUint64(roundAsBytes, e.Round)
+ var roundAsBytes [8]byte
+ binary.LittleEndian.PutUint64(roundAsBytes[:], e.Round)
- schemeAsBytes := make([]byte, 2)
- binary.LittleEndian.PutUint16(schemeAsBytes, CryptoPrimitivesID)
+ var schemeAsBytes [2]byte
+ binary.LittleEndian.PutUint16(schemeAsBytes[:], CryptoPrimitivesID)
keyCommitment := make([]byte, 0, len(schemeAsBytes)+len(verifyingRawKey)+len(roundAsBytes))
- keyCommitment = append(keyCommitment, schemeAsBytes...)
- keyCommitment = append(keyCommitment, roundAsBytes...)
+ keyCommitment = append(keyCommitment, schemeAsBytes[:]...)
+ keyCommitment = append(keyCommitment, roundAsBytes[:]...)
keyCommitment = append(keyCommitment, verifyingRawKey...)
return protocol.KeysInMSS, keyCommitment
diff --git a/crypto/merklesignature/committablePublicKeys_test.go b/crypto/merklesignature/committablePublicKeys_test.go
index b2718f3e1..a6884cc59 100644
--- a/crypto/merklesignature/committablePublicKeys_test.go
+++ b/crypto/merklesignature/committablePublicKeys_test.go
@@ -92,5 +92,5 @@ func TestEphemeralPublicKeysCommitmentBinaryFormat(t *testing.T) {
internal2 := calculateHashOnInternalNode(k1hash, k3hash)
root := calculateHashOnInternalNode(internal1, internal2)
- a.Equal(root, signer.GetVerifier()[:])
+ a.Equal(root, signer.GetVerifier().Commitment[:])
}
diff --git a/crypto/merklesignature/const.go b/crypto/merklesignature/const.go
index d8457ad7b..c98321b51 100644
--- a/crypto/merklesignature/const.go
+++ b/crypto/merklesignature/const.go
@@ -16,10 +16,37 @@
package merklesignature
-import "github.com/algorand/go-algorand/crypto"
+import (
+ "fmt"
+ "github.com/algorand/go-algorand/crypto"
+)
// HashType/ hashSize relate to the type of hash this package uses.
const (
MerkleSignatureSchemeHashFunction = crypto.Sumhash
MerkleSignatureSchemeRootSize = crypto.SumhashDigestSize
+ // KeyLifetimeDefault defines the default lifetime of a key in the merkle signature scheme (in rounds).
+ KeyLifetimeDefault = 256
+
+ // SchemeSaltVersion is the current salt version of merkleSignature
+ SchemeSaltVersion = byte(0)
+
+ // CryptoPrimitivesID is an identification that the Merkle Signature Scheme uses a subset sum hash function
+ // and a falcon signature scheme.
+ CryptoPrimitivesID = uint16(0)
)
+
+// NoKeysCommitment is a const hash value of the empty MerkleSignature Commitment.
+var NoKeysCommitment = Commitment{}
+
+func init() {
+ // no keys generated, inner tree of merkle siganture scheme is empty.
+ o, err := New(KeyLifetimeDefault+1, KeyLifetimeDefault+2, KeyLifetimeDefault)
+ if err != nil {
+ panic(fmt.Errorf("initializing empty merkle signature scheme failed, err: %w", err))
+ }
+ if len(o.GetAllKeys()) > 0 {
+ panic("mss tree has more than just root.")
+ }
+ copy(NoKeysCommitment[:], o.GetVerifier().Commitment[:])
+}
diff --git a/crypto/merklesignature/kats_test.go b/crypto/merklesignature/kats_test.go
new file mode 100644
index 000000000..bc61ec47b
--- /dev/null
+++ b/crypto/merklesignature/kats_test.go
@@ -0,0 +1,115 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package merklesignature
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type mssKat struct {
+ PublicKey []byte
+ KeyLifetime uint64
+ CtSignature []byte
+ EphemeralKey []byte
+ VcIndex uint64
+ CorrespondingRound uint64
+ ProofDepth uint8
+ ProofBytes []byte
+ Message []byte
+}
+
+func extractMssSignatureParts(signature Signature) ([]byte, []byte, []byte, uint8, error) {
+ ctSignature, err := signature.Signature.GetFixedLengthHashableRepresentation()
+ if err != nil {
+ return nil, nil, nil, 0, err
+ }
+
+ pk := signature.VerifyingKey.GetFixedLengthHashableRepresentation()
+ proof := signature.Proof.GetFixedLengthHashableRepresentation()
+ proofDepth := proof[0]
+ proof = proof[1:]
+
+ return ctSignature, pk, proof, proofDepth, nil
+}
+
+func generateMssKat(startRound, atRound, numOfKeys uint64, messageToSign []byte) (mssKat, error) {
+ if startRound > atRound {
+ return mssKat{}, fmt.Errorf("error: Signature round cann't be smaller then start round")
+ }
+
+ interval := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
+ stateProofSecrets, err := New(startRound, startRound+(interval*numOfKeys)-1, interval)
+ if err != nil {
+ return mssKat{}, fmt.Errorf("error: %w", err)
+ }
+
+ keyForRound := stateProofSecrets.GetSigner(atRound)
+ if keyForRound == nil {
+ return mssKat{}, fmt.Errorf("error: There is no key for round %d", atRound)
+ }
+
+ signature, err := keyForRound.SignBytes(messageToSign)
+ if err != nil {
+ return mssKat{}, fmt.Errorf("error while formating mss signature %w", err)
+ }
+ verifier := stateProofSecrets.GetVerifier()
+ ctSignature, pk, proof, proofDepth, err := extractMssSignatureParts(signature)
+ if err != nil {
+ return mssKat{}, fmt.Errorf("error while formating mss signature %w", err)
+ }
+
+ return mssKat{
+ PublicKey: verifier.Commitment[:],
+ KeyLifetime: KeyLifetimeDefault,
+ CtSignature: ctSignature,
+ EphemeralKey: pk,
+ VcIndex: signature.VectorCommitmentIndex,
+ CorrespondingRound: atRound,
+ ProofDepth: proofDepth,
+ ProofBytes: proof,
+ Message: messageToSign}, nil
+}
+
+func TestGenerateKat(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ // This test produces MSS samples for the SNARK verifier.
+ // it will only run explicitly by:
+ //
+ // GEN_KATS=x go test -v . -run=GenerateKat -count=1
+ if os.Getenv("GEN_KATS") == "" {
+ t.Skip("Skipping; GEN_KATS not set")
+ }
+
+ kat, err := generateMssKat(256, 512, 9, []byte("test"))
+ a.NoError(err)
+
+ katAsJSON, err := json.MarshalIndent(kat, "", "\t")
+ a.NoError(err)
+
+ fmt.Println(string(katAsJSON))
+}
diff --git a/crypto/merklesignature/keysBuilder_test.go b/crypto/merklesignature/keysBuilder_test.go
index 29b96b598..ec9e487fe 100644
--- a/crypto/merklesignature/keysBuilder_test.go
+++ b/crypto/merklesignature/keysBuilder_test.go
@@ -35,6 +35,8 @@ func TestBuilderSanity(t *testing.T) {
a.Equal(uint64(len(keys)), numOfKeys)
s, err := keys[0].SignBytes([]byte{0})
+ a.NoError(err)
+
v := keys[0].GetVerifyingKey()
err = v.VerifyBytes([]byte{0}, s)
a.NoError(err)
diff --git a/crypto/merklesignature/merkleSignatureScheme.go b/crypto/merklesignature/merkleSignatureScheme.go
index 983cd859d..b2763d496 100644
--- a/crypto/merklesignature/merkleSignatureScheme.go
+++ b/crypto/merklesignature/merkleSignatureScheme.go
@@ -35,10 +35,10 @@ type (
Signature struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- Signature crypto.FalconSignature `codec:"sig"`
- MerkleArrayIndex uint64 `codec:"idx"`
- Proof merklearray.SingleLeafProof `codec:"prf"`
- VerifyingKey crypto.FalconVerifier `codec:"vkey"`
+ Signature crypto.FalconSignature `codec:"sig"`
+ VectorCommitmentIndex uint64 `codec:"idx"`
+ Proof merklearray.SingleLeafProof `codec:"prf"`
+ VerifyingKey crypto.FalconVerifier `codec:"vkey"`
}
// Secrets contains the private data needed by the merkle signature scheme.
@@ -58,7 +58,7 @@ type (
Signer struct {
SigningKey *crypto.FalconSigner
- // The round for which this SigningKey is related to
+ // The round for which the signature would be valid
Round uint64
SignerContext
@@ -68,13 +68,21 @@ type (
SignerContext struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- FirstValid uint64 `codec:"fv"`
- Interval uint64 `codec:"iv"`
- Tree merklearray.Tree `codec:"tree"`
+ FirstValid uint64 `codec:"fv"`
+ KeyLifetime uint64 `codec:"iv"`
+ Tree merklearray.Tree `codec:"tree"`
}
+ // Commitment represents the root of the vector commitment tree built upon the MSS keys.
+ Commitment [MerkleSignatureSchemeRootSize]byte
+
// Verifier is used to verify a merklesignature.Signature produced by merklesignature.Secrets.
- Verifier [MerkleSignatureSchemeRootSize]byte
+ Verifier struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Commitment Commitment `codec:"cmt"`
+ KeyLifetime uint64 `codec:"lf"`
+ }
//KeyRoundPair represents an ephemeral signing key with it's corresponding round
KeyRoundPair struct {
@@ -85,43 +93,38 @@ type (
}
)
-// CryptoPrimitivesID is an identification that the Merkle Signature Scheme uses a subset sum hash function
-// and a falcon signature scheme.
-var CryptoPrimitivesID = uint16(0)
-
// Errors for the merkle signature scheme
var (
ErrStartBiggerThanEndRound = errors.New("cannot create Merkle Signature Scheme because end round is smaller then start round")
- ErrDivisorIsZero = errors.New("received zero Interval")
+ ErrKeyLifetimeIsZero = errors.New("received zero KeyLifetime")
ErrNoStateProofKeyForRound = errors.New("no stateproof key exists for this round")
ErrSignatureSchemeVerificationFailed = errors.New("merkle signature verification failed")
+ ErrSignatureSaltVersionMismatch = errors.New("the signature's salt version does not match")
)
// New creates secrets needed for the merkle signature scheme.
// This function generates one key for each round within the participation period [firstValid, lastValid] (inclusive bounds)
// which holds round % interval == 0.
-// In case firstValid equals zero then signer will generate all keys from (0,Z], i.e will not generate key for round zero.
-func New(firstValid, lastValid, interval uint64) (*Secrets, error) {
+func New(firstValid, lastValid, keyLifetime uint64) (*Secrets, error) {
if firstValid > lastValid {
return nil, ErrStartBiggerThanEndRound
}
- if interval == 0 {
- return nil, ErrDivisorIsZero
- }
-
- if firstValid == 0 {
- firstValid = 1
+ if keyLifetime == 0 {
+ return nil, ErrKeyLifetimeIsZero
}
// calculates the number of indices from first valid round and up to lastValid.
// writing this explicit calculation to avoid overflow.
- numberOfKeys := lastValid/interval - ((firstValid - 1) / interval)
+ numberOfKeys := lastValid/keyLifetime - ((firstValid - 1) / keyLifetime)
+ if firstValid == 0 {
+ numberOfKeys = lastValid/keyLifetime + 1 // add 1 for round zero
+ }
keys, err := KeysBuilder(numberOfKeys)
if err != nil {
return nil, err
}
- tree, err := merklearray.BuildVectorCommitmentTree(&committablePublicKeyArray{keys, firstValid, interval}, crypto.HashFactory{HashType: MerkleSignatureSchemeHashFunction})
+ tree, err := merklearray.BuildVectorCommitmentTree(&committablePublicKeyArray{keys, firstValid, keyLifetime}, crypto.HashFactory{HashType: MerkleSignatureSchemeHashFunction})
if err != nil {
return nil, err
}
@@ -129,9 +132,9 @@ func New(firstValid, lastValid, interval uint64) (*Secrets, error) {
return &Secrets{
ephemeralKeys: keys,
SignerContext: SignerContext{
- FirstValid: firstValid,
- Interval: interval,
- Tree: *tree,
+ FirstValid: firstValid,
+ KeyLifetime: keyLifetime,
+ Tree: *tree,
},
}, nil
}
@@ -144,46 +147,63 @@ func (s *Secrets) GetVerifier() *Verifier {
// GetVerifier can be used to store the commitment and verifier for this signer.
func (s *SignerContext) GetVerifier() *Verifier {
var ver Verifier
- copy(ver[:], s.Tree.Root())
+ copy(ver.Commitment[:], s.Tree.Root())
+ ver.KeyLifetime = s.KeyLifetime
return &ver
}
-// Sign signs a hash of a given message. The signature is valid on a specific round
-func (s *Signer) Sign(hashable crypto.Hashable) (Signature, error) {
+// FirstRoundInKeyLifetime calculates the round of the valid key for a given round by lowering to the closest KeyLiftime divisor.
+func (s *Signer) FirstRoundInKeyLifetime() (uint64, error) {
+ if s.KeyLifetime == 0 {
+ return 0, ErrKeyLifetimeIsZero
+ }
+
+ return firstRoundInKeyLifetime(s.Round, s.KeyLifetime), nil
+}
+
+func (s *Signer) vectorCommitmentTreeIndex() (uint64, error) {
+ validKeyRound, err := s.FirstRoundInKeyLifetime()
+ if err != nil {
+ return 0, err
+ }
+ return roundToIndex(s.FirstValid, validKeyRound, s.KeyLifetime), nil
+}
+
+// SignBytes signs a given message. The signature is valid on a specific round
+func (s *Signer) SignBytes(msg []byte) (Signature, error) {
key := s.SigningKey
// Possible since there may not be a StateProof key for this specific round
if key == nil {
return Signature{}, ErrNoStateProofKeyForRound
}
- if err := checkMerkleSignatureSchemeParams(s.FirstValid, s.Round, s.Interval); err != nil {
+ if err := checkMerkleSignatureSchemeParams(s.FirstValid, s.Round, s.KeyLifetime); err != nil {
+ return Signature{}, err
+ }
+
+ vcIdx, err := s.vectorCommitmentTreeIndex()
+ if err != nil {
return Signature{}, err
}
- index := s.getMerkleTreeIndex(s.Round)
- proof, err := s.Tree.ProveSingleLeaf(index)
+ proof, err := s.Tree.ProveSingleLeaf(vcIdx)
if err != nil {
return Signature{}, err
}
- sig, err := s.SigningKey.Sign(hashable)
+ sig, err := key.SignBytes(msg)
if err != nil {
return Signature{}, err
}
return Signature{
- Signature: sig,
- Proof: *proof,
- VerifyingKey: *s.SigningKey.GetVerifyingKey(),
- MerkleArrayIndex: index,
+ Signature: sig,
+ Proof: *proof,
+ VerifyingKey: *s.SigningKey.GetVerifyingKey(),
+ VectorCommitmentIndex: vcIdx,
}, nil
}
-// expects valid rounds, i.e round that are bigger than FirstValid.
-func (s *Signer) getMerkleTreeIndex(round uint64) uint64 {
- return roundToIndex(s.FirstValid, round, s.Interval)
-}
-
// GetAllKeys returns all stateproof secrets.
// An empty array will be return if no stateproof secrets are found
func (s *Secrets) GetAllKeys() []KeyRoundPair {
@@ -191,7 +211,7 @@ func (s *Secrets) GetAllKeys() []KeyRoundPair {
keys := make([]KeyRoundPair, NumOfKeys)
for i := uint64(0); i < NumOfKeys; i++ {
keyRound := KeyRoundPair{
- Round: indexToRound(s.SignerContext.FirstValid, s.SignerContext.Interval, i),
+ Round: indexToRound(s.FirstValid, s.KeyLifetime, i),
Key: &s.ephemeralKeys[i],
}
keys[i] = keyRound
@@ -202,8 +222,9 @@ func (s *Secrets) GetAllKeys() []KeyRoundPair {
// GetKey retrieves key from memory
// the function return nil if the key does not exists
func (s *Secrets) GetKey(round uint64) *crypto.FalconSigner {
- idx := roundToIndex(s.FirstValid, round, s.Interval)
- if idx >= uint64(len(s.ephemeralKeys)) || (round%s.Interval) != 0 {
+ keyRound := firstRoundInKeyLifetime(round, s.KeyLifetime)
+ idx := roundToIndex(s.FirstValid, keyRound, s.KeyLifetime)
+ if idx >= uint64(len(s.ephemeralKeys)) || (keyRound%s.KeyLifetime) != 0 || keyRound < s.FirstValid {
return nil
}
@@ -220,33 +241,54 @@ func (s *Secrets) GetSigner(round uint64) *Signer {
}
// IsEmpty returns true if the verifier contains an empty key
-func (v *Verifier) IsEmpty() bool {
+func (v *Commitment) IsEmpty() bool {
return *v == [MerkleSignatureSchemeRootSize]byte{}
}
-// Verify verifies that a merklesignature sig is valid, on a specific round, under a given public key
-func (v *Verifier) Verify(round uint64, msg crypto.Hashable, sig Signature) error {
+// ValidateSaltVersion validates that the version of the signature is matching the expected version
+func (s *Signature) ValidateSaltVersion(version byte) error {
+ if !s.Signature.IsSaltVersionEqual(version) {
+ return ErrSignatureSaltVersionMismatch
+ }
+ return nil
+}
+
+// FirstRoundInKeyLifetime calculates the round of the valid key for a given round by lowering to the closest KeyLiftime divisor.
+func (v *Verifier) FirstRoundInKeyLifetime(round uint64) (uint64, error) {
+ if v.KeyLifetime == 0 {
+ return 0, ErrKeyLifetimeIsZero
+ }
+
+ return firstRoundInKeyLifetime(round, v.KeyLifetime), nil
+}
+
+// VerifyBytes verifies that a merklesignature sig is valid, on a specific round, under a given public key
+func (v *Verifier) VerifyBytes(round uint64, msg []byte, sig *Signature) error {
+ validKeyRound, err := v.FirstRoundInKeyLifetime(round)
+ if err != nil {
+ return err
+ }
ephkey := CommittablePublicKey{
VerifyingKey: sig.VerifyingKey,
- Round: round,
+ Round: validKeyRound,
}
// verify the merkle tree verification path using the ephemeral public key, the
// verification path and the index.
- err := merklearray.VerifyVectorCommitment(
- v[:],
- map[uint64]crypto.Hashable{sig.MerkleArrayIndex: &ephkey},
+ err = merklearray.VerifyVectorCommitment(
+ v.Commitment[:],
+ map[uint64]crypto.Hashable{sig.VectorCommitmentIndex: &ephkey},
sig.Proof.ToProof(),
)
if err != nil {
- return fmt.Errorf("%w - %v", ErrSignatureSchemeVerificationFailed, err)
+ return fmt.Errorf("%w: %v", ErrSignatureSchemeVerificationFailed, err)
}
// verify that the signature is valid under the ephemeral public key
- err = sig.VerifyingKey.Verify(msg, sig.Signature)
+ err = sig.VerifyingKey.VerifyBytes(msg, sig.Signature)
if err != nil {
- return fmt.Errorf("%w - %v", ErrSignatureSchemeVerificationFailed, err)
+ return fmt.Errorf("%w: %v", ErrSignatureSchemeVerificationFailed, err)
}
return nil
}
@@ -254,25 +296,25 @@ func (v *Verifier) Verify(round uint64, msg crypto.Hashable, sig Signature) erro
// GetFixedLengthHashableRepresentation returns the signature as a hashable byte sequence.
// the format details can be found in the Algorand's spec.
func (s *Signature) GetFixedLengthHashableRepresentation() ([]byte, error) {
- schemeType := make([]byte, 2)
- binary.LittleEndian.PutUint16(schemeType, CryptoPrimitivesID)
- sigBytes, err := s.VerifyingKey.GetSignatureFixedLengthHashableRepresentation(s.Signature)
+ var schemeType [2]byte
+ binary.LittleEndian.PutUint16(schemeType[:], CryptoPrimitivesID)
+ sigBytes, err := s.Signature.GetFixedLengthHashableRepresentation()
if err != nil {
return nil, err
}
verifierBytes := s.VerifyingKey.GetFixedLengthHashableRepresentation()
- binaryMerkleIndex := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryMerkleIndex, s.MerkleArrayIndex)
+ var binaryVectorCommitmentIndex [8]byte
+ binary.LittleEndian.PutUint64(binaryVectorCommitmentIndex[:], s.VectorCommitmentIndex)
proofBytes := s.Proof.GetFixedLengthHashableRepresentation()
- merkleSignatureBytes := make([]byte, 0, len(schemeType)+len(sigBytes)+len(verifierBytes)+len(binaryMerkleIndex)+len(proofBytes))
- merkleSignatureBytes = append(merkleSignatureBytes, schemeType...)
+ merkleSignatureBytes := make([]byte, 0, len(schemeType)+len(sigBytes)+len(verifierBytes)+len(binaryVectorCommitmentIndex)+len(proofBytes))
+ merkleSignatureBytes = append(merkleSignatureBytes, schemeType[:]...)
merkleSignatureBytes = append(merkleSignatureBytes, sigBytes...)
merkleSignatureBytes = append(merkleSignatureBytes, verifierBytes...)
- merkleSignatureBytes = append(merkleSignatureBytes, binaryMerkleIndex...)
+ merkleSignatureBytes = append(merkleSignatureBytes, binaryVectorCommitmentIndex[:]...)
merkleSignatureBytes = append(merkleSignatureBytes, proofBytes...)
return merkleSignatureBytes, nil
}
diff --git a/crypto/merklesignature/merkleSignatureScheme_test.go b/crypto/merklesignature/merkleSignatureScheme_test.go
index ff51639bd..db4f4e6e4 100644
--- a/crypto/merklesignature/merkleSignatureScheme_test.go
+++ b/crypto/merklesignature/merkleSignatureScheme_test.go
@@ -30,33 +30,26 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-type TestingHashable struct {
- data []byte
-}
-
-func (s TestingHashable) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.TestHashable, s.data
-}
-
func TestSignerCreation(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
var err error
- h := genHashableForTest()
+ h := genMsgForTest()
for i := uint64(1); i < 20; i++ {
signer := generateTestSigner(i, i+1, 1, a)
- _, err = signer.GetSigner(i).Sign(h)
+ _, err = signer.GetSigner(i).SignBytes(h)
a.NoError(err)
}
- testSignerNumKeysLimits := func(firstValid uint64, lastValid uint64, interval uint64, expectedLen int) {
- signer := generateTestSigner(firstValid, lastValid, interval, a)
+ testSignerNumKeysLimits := func(firstValid uint64, lastValid uint64, keyLifetime uint64, expectedLen int) {
+ signer := generateTestSigner(firstValid, lastValid, keyLifetime, a)
a.Equal(expectedLen, length(signer, a))
}
- testSignerNumKeysLimits(0, 0, 1, 0)
- testSignerNumKeysLimits(0, 1, 1, 1)
+ testSignerNumKeysLimits(0, 0, 1, 1)
+ testSignerNumKeysLimits(0, 1, 1, 2)
testSignerNumKeysLimits(2, 2, 2, 1)
testSignerNumKeysLimits(8, 21, 10, 2)
testSignerNumKeysLimits(8, 20, 10, 2)
@@ -67,24 +60,44 @@ func TestSignerCreation(t *testing.T) {
signer := generateTestSigner(2, 2, 2, a)
a.Equal(1, length(signer, a))
- sig, err := signer.GetSigner(2).Sign(genHashableForTest())
+ sig, err := signer.GetSigner(2).SignBytes(genMsgForTest())
a.NoError(err)
- a.NoError(signer.GetVerifier().Verify(2, genHashableForTest(), sig))
+ a.NoError(signer.GetVerifier().VerifyBytes(2, genMsgForTest(), &sig))
signer = generateTestSigner(2, 2, 3, a)
a.Equal(0, length(signer, a))
- _, err = signer.GetSigner(2).Sign(genHashableForTest())
+ _, err = signer.GetSigner(2).SignBytes(genMsgForTest())
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
signer = generateTestSigner(11, 19, 10, a)
a.Equal(0, length(signer, a))
- _, err = signer.GetSigner(2).Sign(genHashableForTest())
+ _, err = signer.GetSigner(2).SignBytes(genMsgForTest())
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
+
+ // Make sure both rounds 10 and 11 can be signed (as key for round 10 is valid for both)
+ signer = generateTestSigner(0, 19, 10, a)
+
+ sig, err = signer.GetSigner(10).SignBytes(genMsgForTest())
+ a.NoError(err)
+ a.NoError(signer.GetVerifier().VerifyBytes(10, genMsgForTest(), &sig))
+
+ sig, err = signer.GetSigner(11).SignBytes(genMsgForTest())
+ a.NoError(err)
+ a.NoError(signer.GetVerifier().VerifyBytes(11, genMsgForTest(), &sig))
+
+ sig, err = signer.GetSigner(0).SignBytes(genMsgForTest())
+ a.NoError(err)
+ a.NoError(signer.GetVerifier().VerifyBytes(0, genMsgForTest(), &sig))
+
+ sig, err = signer.GetSigner(1).SignBytes(genMsgForTest())
+ a.NoError(err)
+ a.NoError(signer.GetVerifier().VerifyBytes(1, genMsgForTest(), &sig))
}
func TestSignerCreationOutOfBounds(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
_, err := New(8, 4, 1)
@@ -93,43 +106,64 @@ func TestSignerCreationOutOfBounds(t *testing.T) {
_, err = New(1, 8, 0)
a.Error(err)
- a.ErrorIs(err, ErrDivisorIsZero)
+ a.ErrorIs(err, ErrKeyLifetimeIsZero)
}
func TestEmptyVerifier(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
signer := generateTestSigner(8, 9, 5, a)
// even if there are no keys for that period, the root is not empty
// (part of the vector commitment property).
- a.Equal(false, signer.GetVerifier().IsEmpty())
+ a.False(signer.GetVerifier().MsgIsZero())
+}
+
+func TestVerifierKeyLifetimeError(t *testing.T) {
+ t.Parallel()
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signer := generateTestSigner(8, 12, 1, a)
+ verifier := signer.GetVerifier()
+
+ verifier.KeyLifetime = 0
+ a.ErrorIs(verifier.VerifyBytes(0, []byte(""), &Signature{}), ErrKeyLifetimeIsZero)
+
+ verifier.KeyLifetime = 1
+ sig, err := signer.GetSigner(10).SignBytes([]byte("hello"))
+ a.NoError(err)
+
+ a.NoError(verifier.VerifyBytes(10, []byte("hello"), &sig))
}
func TestEmptySigner(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
var err error
- h := genHashableForTest()
+ h := genMsgForTest()
signer := generateTestSigner(8, 9, 5, a)
a.Equal(0, length(signer, a))
- _, err = signer.GetSigner(8).Sign(h)
+ _, err = signer.GetSigner(8).SignBytes(h)
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
- _, err = signer.GetSigner(9).Sign(h)
+ _, err = signer.GetSigner(9).SignBytes(h)
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
}
func TestDisposableKeysGeneration(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
signer := generateTestSigner(0, 100, 1, a)
- for i := uint64(1); i < 100; i++ {
+ for i := uint64(0); i < 100; i++ {
k := signer.GetKey(i)
a.NotNil(k)
}
@@ -146,19 +180,26 @@ func TestDisposableKeysGeneration(t *testing.T) {
k = signer.GetKey(999)
a.Nil(k)
- signer = generateTestSigner(1000, 1100, 101, a)
- intervalRounds := make([]uint64, 0)
- for i := uint64(1000); i <= 1100; i++ {
- if i%101 == 0 {
- intervalRounds = append(intervalRounds, i)
- continue
- }
- k := signer.GetKey(i)
- a.Nil(k)
+ signer = generateTestSigner(1000, 1100, 105, a)
+ i := uint64(1000)
+ for ; i < 1050; i++ {
+ a.Nil(signer.GetKey(i))
+ }
+
+ k = signer.GetKey(i) // 1050
+ a.NotNil(k)
+ pk := k.PublicKey
+ i++
+
+ for ; i <= 1100; i++ { // same key since it's under the same lifetime period
+ k = signer.GetKey(i)
+ a.NotNil(k)
+ a.Equal(pk, k.PublicKey)
}
}
func TestNonEmptyDisposableKeys(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -182,13 +223,14 @@ func TestNonEmptyDisposableKeys(t *testing.T) {
}
func TestSignatureStructure(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
signer := generateTestSigner(50, 100, 1, a)
- hashable := genHashableForTest()
- sig, err := signer.GetSigner(51).Sign(hashable)
+ msg := genMsgForTest()
+ sig, err := signer.GetSigner(51).SignBytes(msg)
a.NoError(err)
key := signer.GetKey(51)
@@ -203,58 +245,59 @@ func TestSignatureStructure(t *testing.T) {
a.NotEqual(nil, sig.Signature)
}
-func genHashableForTest() crypto.Hashable {
- hashable := TestingHashable{[]byte("test msg")}
-
- return hashable
+func genMsgForTest() []byte {
+ return []byte("test msg")
}
func TestSigning(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
start, end := uint64(50), uint64(100)
signer := generateTestSigner(start, end, 1, a)
- hashable := genHashableForTest()
+ msg := genMsgForTest()
- sig, err := signer.GetSigner(start).Sign(hashable)
+ sig, err := signer.GetSigner(start).SignBytes(msg)
a.NoError(err)
- a.NoError(signer.GetVerifier().Verify(start, hashable, sig))
+ a.NoError(signer.GetVerifier().VerifyBytes(start, msg, &sig))
- _, err = signer.GetSigner(start - 1).Sign(hashable)
+ _, err = signer.GetSigner(start - 1).SignBytes(msg)
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
- _, err = signer.GetSigner(end + 1).Sign(hashable)
+ _, err = signer.GetSigner(end + 1).SignBytes(msg)
a.Error(err)
a.ErrorIs(err, ErrNoStateProofKeyForRound)
signer = generateTestSigner(start, end, 10, a)
- sig, err = signer.GetSigner(start).Sign(hashable)
+ sig, err = signer.GetSigner(start).SignBytes(msg)
a.NoError(err)
- a.NoError(signer.GetVerifier().Verify(start, hashable, sig))
+ a.NoError(signer.GetVerifier().VerifyBytes(start, msg, &sig))
- sig, err = signer.GetSigner(start + 5).Sign(hashable)
- a.Error(err)
+ sig, err = signer.GetSigner(start + 5).SignBytes(msg)
+ a.NoError(err)
- err = signer.GetVerifier().Verify(start+5, hashable, sig)
- a.Error(err)
- a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
+ err = signer.GetVerifier().VerifyBytes(start+5, msg, &sig)
+ a.NoError(err)
+ err = signer.GetVerifier().VerifyBytes(start+7, msg, &sig) // same key used since both rounds under same lifetime of key
+ a.NoError(err)
signer = generateTestSigner(50, 100, 12, a)
a.Equal(4, length(signer, a))
- for i := uint64(50); i < 100; i++ {
- if i%12 != 0 {
- _, err = signer.GetSigner(i).Sign(hashable)
- a.Error(err)
- } else {
- sig, err = signer.GetSigner(i).Sign(hashable)
- a.NoError(err)
- a.NoError(signer.GetVerifier().Verify(i, hashable, sig))
- }
+ i := uint64(50)
+ for ; i < 60; i++ { // no key for these rounds (key for round 48 was not generated)
+ _, err = signer.GetSigner(i).SignBytes(msg)
+ a.Error(err)
+ a.ErrorIs(err, ErrNoStateProofKeyForRound)
+ }
+ for ; i < 100; i++ {
+ sig, err = signer.GetSigner(i).SignBytes(msg)
+ a.NoError(err)
+ a.NoError(signer.GetVerifier().VerifyBytes(i, msg, &sig))
}
signer = generateTestSigner(234, 4634, 256, a)
@@ -262,42 +305,44 @@ func TestSigning(t *testing.T) {
a.NotNil(key)
key = signer.GetKey(4096)
a.NotNil(key)
- key = signer.GetKey(234 + 256)
+ key = signer.GetKey(234) // keys valid only for round > 256
a.Nil(key)
}
func TestBadRound(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
start, _, signer := generateTestSignerAux(a)
- hashable, sig := makeSig(signer, start, a)
+ msg, sig := makeSig(signer, start, a)
- err := signer.GetVerifier().Verify(start+1, hashable, sig)
+ err := signer.GetVerifier().VerifyBytes(start+1, msg, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
- hashable, sig = makeSig(signer, start+1, a)
- err = signer.GetVerifier().Verify(start, hashable, sig)
+ msg, sig = makeSig(signer, start+1, a)
+ err = signer.GetVerifier().VerifyBytes(start, msg, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
- err = signer.GetVerifier().Verify(start+2, hashable, sig)
+ err = signer.GetVerifier().VerifyBytes(start+2, msg, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
a.True(errors.Is(err, ErrSignatureSchemeVerificationFailed))
}
func TestBadMerkleProofInSignature(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
start, _, signer := generateTestSignerAux(a)
- hashable, sig := makeSig(signer, start, a)
+ msg, sig := makeSig(signer, start, a)
sig2 := copySig(sig)
sig2.Proof.Path = sig2.Proof.Path[:len(sig2.Proof.Path)-1]
- err := signer.GetVerifier().Verify(start, hashable, sig2)
+ err := signer.GetVerifier().VerifyBytes(start, msg, &sig2)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
@@ -305,7 +350,7 @@ func TestBadMerkleProofInSignature(t *testing.T) {
someDigest := crypto.Digest{}
rand.Read(someDigest[:])
sig3.Proof.Path[0] = someDigest[:]
- err = signer.GetVerifier().Verify(start, hashable, sig3)
+ err = signer.GetVerifier().VerifyBytes(start, msg, &sig3)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
}
@@ -322,11 +367,12 @@ func copySig(sig Signature) Signature {
}
func TestIncorrectByteSignature(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
start, _, signer := generateTestSignerAux(a)
- hashable, sig := makeSig(signer, start, a)
+ msg, sig := makeSig(signer, start, a)
sig2 := sig
bs := make([]byte, len(sig.Signature))
@@ -334,44 +380,46 @@ func TestIncorrectByteSignature(t *testing.T) {
bs[0]++
sig2.Signature = bs
- err := signer.GetVerifier().Verify(start, hashable, sig2)
+ err := signer.GetVerifier().VerifyBytes(start, msg, &sig2)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
}
func TestIncorrectMerkleIndex(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
var err error
- h := genHashableForTest()
+ h := genMsgForTest()
signer := generateTestSigner(8, 100, 5, a)
a.NoError(err)
- sig, err := signer.GetSigner(20).Sign(h)
+ sig, err := signer.GetSigner(20).SignBytes(h)
a.NoError(err)
- sig.MerkleArrayIndex = 0
- err = signer.GetVerifier().Verify(20, h, sig)
+ sig.VectorCommitmentIndex = 0
+ err = signer.GetVerifier().VerifyBytes(20, h, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
- sig.MerkleArrayIndex = math.MaxUint64
- err = signer.GetVerifier().Verify(20, h, sig)
+ sig.VectorCommitmentIndex = math.MaxUint64
+ err = signer.GetVerifier().VerifyBytes(20, h, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
- err = signer.GetVerifier().Verify(20, h, sig)
+ err = signer.GetVerifier().VerifyBytes(20, h, &sig)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
}
func TestAttemptToUseDifferentKey(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
start, _, signer := generateTestSignerAux(a)
- hashable, sig := makeSig(signer, start+1, a)
+ msg, sig := makeSig(signer, start+1, a)
// taking signature for specific round and changing the round
// taking signature and changing the key to match different round
@@ -381,12 +429,13 @@ func TestAttemptToUseDifferentKey(t *testing.T) {
sig2.VerifyingKey = *(key.GetVerifyingKey())
- err := signer.GetVerifier().Verify(start+1, hashable, sig2)
+ err := signer.GetVerifier().VerifyBytes(start+1, msg, &sig2)
a.Error(err)
a.ErrorIs(err, ErrSignatureSchemeVerificationFailed)
}
func TestMarshal(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -405,49 +454,51 @@ func TestMarshal(t *testing.T) {
}
func TestNumberOfGeneratedKeys(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
- interval := uint64(256)
+ keyLifetime := uint64(256)
numberOfKeys := uint64(1 << 6)
- validPeriod := numberOfKeys*interval - 1
+ validPeriod := numberOfKeys*keyLifetime - 1
firstValid := uint64(1000)
lastValid := validPeriod + 1000
- s, err := New(firstValid, lastValid, interval)
+ s, err := New(firstValid, lastValid, keyLifetime)
a.NoError(err)
a.Equal(numberOfKeys, uint64(length(s, a)))
firstValid = uint64(0)
lastValid = validPeriod
- s, err = New(firstValid, lastValid, interval)
+ s, err = New(firstValid, lastValid, keyLifetime)
a.NoError(err)
- a.Equal(numberOfKeys-1, uint64(length(s, a)))
+ a.Equal(numberOfKeys, uint64(length(s, a)))
firstValid = uint64(1000)
- lastValid = validPeriod + 1000 - (interval * 50)
- s, err = New(firstValid, lastValid, interval)
+ lastValid = validPeriod + 1000 - (keyLifetime * 50)
+ s, err = New(firstValid, lastValid, keyLifetime)
a.NoError(err)
a.Equal(numberOfKeys-50, uint64(length(s, a)))
}
func TestGetAllKeys(t *testing.T) {
+ t.Parallel()
partitiontest.PartitionTest(t)
a := require.New(t)
- interval := uint64(256)
+ keyLifetime := uint64(256)
numOfKeys := uint64(1 << 8)
- validPeriod := numOfKeys*interval - 1
+ validPeriod := numOfKeys*keyLifetime - 1
firstValid := uint64(1000)
lastValid := validPeriod + 1000
- s, err := New(firstValid, lastValid, interval)
+ s, err := New(firstValid, lastValid, keyLifetime)
a.NoError(err)
a.Equal(numOfKeys, uint64(len(s.ephemeralKeys)))
keys := s.GetAllKeys()
for i := uint64(0); i < uint64(len(s.ephemeralKeys)); i++ {
a.Equal(s.ephemeralKeys[i], *keys[i].Key)
- a.Equal(indexToRound(firstValid, interval, i), keys[i].Round)
+ a.Equal(indexToRound(firstValid, keyLifetime, i), keys[i].Round)
}
s, err = New(1, 2, 100)
@@ -459,13 +510,13 @@ func TestGetAllKeys(t *testing.T) {
}
//#region Helper Functions
-func makeSig(signer *Secrets, sigRound uint64, a *require.Assertions) (crypto.Hashable, Signature) {
- hashable := genHashableForTest()
+func makeSig(signer *Secrets, sigRound uint64, a *require.Assertions) ([]byte, Signature) {
+ msg := genMsgForTest()
- sig, err := signer.GetSigner(sigRound).Sign(hashable)
+ sig, err := signer.GetSigner(sigRound).SignBytes(msg)
a.NoError(err)
- a.NoError(signer.GetVerifier().Verify(sigRound, hashable, sig))
- return hashable, sig
+ a.NoError(signer.GetVerifier().VerifyBytes(sigRound, msg, &sig))
+ return msg, sig
}
func generateTestSignerAux(a *require.Assertions) (uint64, uint64, *Secrets) {
@@ -474,8 +525,8 @@ func generateTestSignerAux(a *require.Assertions) (uint64, uint64, *Secrets) {
return start, end, signer
}
-func generateTestSigner(firstValid, lastValid, interval uint64, a *require.Assertions) *Secrets {
- signer, err := New(firstValid, lastValid, interval)
+func generateTestSigner(firstValid, lastValid, keyLifetime uint64, a *require.Assertions) *Secrets {
+ signer, err := New(firstValid, lastValid, keyLifetime)
a.NoError(err)
return signer
@@ -502,16 +553,16 @@ func copyProof(proof merklearray.SingleLeafProof) merklearray.SingleLeafProof {
func TestTreeRootHashLength(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- interval := uint64(256)
+ keyLifetime := uint64(256)
numOfKeys := uint64(1 << 8)
- validPeriod := numOfKeys*interval - 1
+ validPeriod := numOfKeys*keyLifetime - 1
firstValid := uint64(1000)
lastValid := validPeriod + 1000
- s, err := New(firstValid, lastValid, interval)
+ s, err := New(firstValid, lastValid, keyLifetime)
a.NoError(err)
a.Equal(numOfKeys, uint64(len(s.ephemeralKeys)))
a.Equal(MerkleSignatureSchemeRootSize, len(s.Tree.Root()))
- a.Equal(MerkleSignatureSchemeRootSize, len(Verifier{}))
+ a.Equal(MerkleSignatureSchemeRootSize, len(Verifier{}.Commitment))
}
diff --git a/crypto/merklesignature/msgp_gen.go b/crypto/merklesignature/msgp_gen.go
index a40196d4a..ae33fa175 100644
--- a/crypto/merklesignature/msgp_gen.go
+++ b/crypto/merklesignature/msgp_gen.go
@@ -9,6 +9,14 @@ import (
)
// The following msgp objects are implemented in this file:
+// Commitment
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// KeyRoundPair
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -51,6 +59,45 @@ import (
//
// MarshalMsg implements msgp.Marshaler
+func (z *Commitment) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendBytes(o, (*z)[:])
+ return
+}
+
+func (_ *Commitment) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Commitment)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Commitment) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ bts, err = msgp.ReadExactBytes(bts, (*z)[:])
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ o = bts
+ return
+}
+
+func (_ *Commitment) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Commitment)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Commitment) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize + (MerkleSignatureSchemeRootSize * (msgp.ByteSize))
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *Commitment) MsgIsZero() bool {
+ return (*z) == (Commitment{})
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *KeyRoundPair) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -220,7 +267,7 @@ func (z *Secrets) MarshalMsg(b []byte) (o []byte) {
zb0002Len--
zb0002Mask |= 0x8
}
- if (*z).SignerContext.Interval == 0 {
+ if (*z).SignerContext.KeyLifetime == 0 {
zb0002Len--
zb0002Mask |= 0x10
}
@@ -239,7 +286,7 @@ func (z *Secrets) MarshalMsg(b []byte) (o []byte) {
if (zb0002Mask & 0x10) == 0 { // if not empty
// string "iv"
o = append(o, 0xa2, 0x69, 0x76)
- o = msgp.AppendUint64(o, (*z).SignerContext.Interval)
+ o = msgp.AppendUint64(o, (*z).SignerContext.KeyLifetime)
}
if (zb0002Mask & 0x20) == 0 { // if not empty
// string "tree"
@@ -278,9 +325,9 @@ func (z *Secrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0002 > 0 {
zb0002--
- (*z).SignerContext.Interval, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).SignerContext.KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Interval")
+ err = msgp.WrapError(err, "struct-from-array", "KeyLifetime")
return
}
}
@@ -322,9 +369,9 @@ func (z *Secrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "iv":
- (*z).SignerContext.Interval, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).SignerContext.KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "Interval")
+ err = msgp.WrapError(err, "KeyLifetime")
return
}
case "tree":
@@ -359,7 +406,7 @@ func (z *Secrets) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Secrets) MsgIsZero() bool {
- return ((*z).SignerContext.FirstValid == 0) && ((*z).SignerContext.Interval == 0) && ((*z).SignerContext.Tree.MsgIsZero())
+ return ((*z).SignerContext.FirstValid == 0) && ((*z).SignerContext.KeyLifetime == 0) && ((*z).SignerContext.Tree.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -368,7 +415,7 @@ func (z *Signature) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 5 bits */
- if (*z).MerkleArrayIndex == 0 {
+ if (*z).VectorCommitmentIndex == 0 {
zb0001Len--
zb0001Mask |= 0x2
}
@@ -390,7 +437,7 @@ func (z *Signature) MarshalMsg(b []byte) (o []byte) {
if (zb0001Mask & 0x2) == 0 { // if not empty
// string "idx"
o = append(o, 0xa3, 0x69, 0x64, 0x78)
- o = msgp.AppendUint64(o, (*z).MerkleArrayIndex)
+ o = msgp.AppendUint64(o, (*z).VectorCommitmentIndex)
}
if (zb0001Mask & 0x4) == 0 { // if not empty
// string "prf"
@@ -439,9 +486,9 @@ func (z *Signature) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- (*z).MerkleArrayIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).VectorCommitmentIndex, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MerkleArrayIndex")
+ err = msgp.WrapError(err, "struct-from-array", "VectorCommitmentIndex")
return
}
}
@@ -491,9 +538,9 @@ func (z *Signature) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "idx":
- (*z).MerkleArrayIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).VectorCommitmentIndex, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "MerkleArrayIndex")
+ err = msgp.WrapError(err, "VectorCommitmentIndex")
return
}
case "prf":
@@ -534,7 +581,7 @@ func (z *Signature) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Signature) MsgIsZero() bool {
- return ((*z).Signature.MsgIsZero()) && ((*z).MerkleArrayIndex == 0) && ((*z).Proof.MsgIsZero()) && ((*z).VerifyingKey.MsgIsZero())
+ return ((*z).Signature.MsgIsZero()) && ((*z).VectorCommitmentIndex == 0) && ((*z).Proof.MsgIsZero()) && ((*z).VerifyingKey.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -547,7 +594,7 @@ func (z *SignerContext) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x2
}
- if (*z).Interval == 0 {
+ if (*z).KeyLifetime == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
@@ -566,7 +613,7 @@ func (z *SignerContext) MarshalMsg(b []byte) (o []byte) {
if (zb0001Mask & 0x4) == 0 { // if not empty
// string "iv"
o = append(o, 0xa2, 0x69, 0x76)
- o = msgp.AppendUint64(o, (*z).Interval)
+ o = msgp.AppendUint64(o, (*z).KeyLifetime)
}
if (zb0001Mask & 0x8) == 0 { // if not empty
// string "tree"
@@ -605,9 +652,9 @@ func (z *SignerContext) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- (*z).Interval, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Interval")
+ err = msgp.WrapError(err, "struct-from-array", "KeyLifetime")
return
}
}
@@ -649,9 +696,9 @@ func (z *SignerContext) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "iv":
- (*z).Interval, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
- err = msgp.WrapError(err, "Interval")
+ err = msgp.WrapError(err, "KeyLifetime")
return
}
case "tree":
@@ -686,13 +733,37 @@ func (z *SignerContext) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *SignerContext) MsgIsZero() bool {
- return ((*z).FirstValid == 0) && ((*z).Interval == 0) && ((*z).Tree.MsgIsZero())
+ return ((*z).FirstValid == 0) && ((*z).KeyLifetime == 0) && ((*z).Tree.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
func (z *Verifier) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendBytes(o, (*z)[:])
+ // omitempty: check for empty values
+ zb0002Len := uint32(2)
+ var zb0002Mask uint8 /* 3 bits */
+ if (*z).Commitment == (Commitment{}) {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ if (*z).KeyLifetime == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x4
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "cmt"
+ o = append(o, 0xa3, 0x63, 0x6d, 0x74)
+ o = msgp.AppendBytes(o, ((*z).Commitment)[:])
+ }
+ if (zb0002Mask & 0x4) == 0 { // if not empty
+ // string "lf"
+ o = append(o, 0xa2, 0x6c, 0x66)
+ o = msgp.AppendUint64(o, (*z).KeyLifetime)
+ }
+ }
return
}
@@ -703,10 +774,76 @@ func (_ *Verifier) CanMarshalMsg(z interface{}) bool {
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Verifier) UnmarshalMsg(bts []byte) (o []byte, err error) {
- bts, err = msgp.ReadExactBytes(bts, (*z)[:])
- if err != nil {
- err = msgp.WrapError(err)
- return
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Commitment)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Commitment")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KeyLifetime")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = Verifier{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "cmt":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Commitment)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Commitment")
+ return
+ }
+ case "lf":
+ (*z).KeyLifetime, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KeyLifetime")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
}
o = bts
return
@@ -719,11 +856,11 @@ func (_ *Verifier) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Verifier) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (MerkleSignatureSchemeRootSize * (msgp.ByteSize))
+ s = 1 + 4 + msgp.ArrayHeaderSize + (MerkleSignatureSchemeRootSize * (msgp.ByteSize)) + 3 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *Verifier) MsgIsZero() bool {
- return (*z) == (Verifier{})
+ return ((*z).Commitment == (Commitment{})) && ((*z).KeyLifetime == 0)
}
diff --git a/crypto/merklesignature/msgp_gen_test.go b/crypto/merklesignature/msgp_gen_test.go
index 56c8e2bdf..7a53df31a 100644
--- a/crypto/merklesignature/msgp_gen_test.go
+++ b/crypto/merklesignature/msgp_gen_test.go
@@ -14,6 +14,66 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
+func TestMarshalUnmarshalCommitment(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := Commitment{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingCommitment(t *testing.T) {
+ protocol.RunEncodingTest(t, &Commitment{})
+}
+
+func BenchmarkMarshalMsgCommitment(b *testing.B) {
+ v := Commitment{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgCommitment(b *testing.B) {
+ v := Commitment{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalCommitment(b *testing.B) {
+ v := Commitment{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalKeyRoundPair(t *testing.T) {
partitiontest.PartitionTest(t)
v := KeyRoundPair{}
diff --git a/crypto/merklesignature/persistentMerkleSignatureScheme.go b/crypto/merklesignature/persistentMerkleSignatureScheme.go
index 1ce67c8d5..2ece3380d 100644
--- a/crypto/merklesignature/persistentMerkleSignatureScheme.go
+++ b/crypto/merklesignature/persistentMerkleSignatureScheme.go
@@ -94,10 +94,10 @@ func (s *Secrets) Persist(store db.Accessor) error {
if s.ephemeralKeys == nil {
return fmt.Errorf("no keys provided (nil)")
}
- if s.Interval == 0 {
- return fmt.Errorf("Secrets.Persist: %w", errIntervalZero)
+ if s.KeyLifetime == 0 {
+ return fmt.Errorf("Secrets.Persist: %w", ErrKeyLifetimeIsZero)
}
- round := indexToRound(s.FirstValid, s.Interval, 0)
+ round := indexToRound(s.FirstValid, s.KeyLifetime, 0)
encodedKey := protocol.GetEncodingBuf()
err := store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
err := InstallStateProofTable(tx) // assumes schema table already exists (created by partInstallDatabase)
@@ -121,7 +121,7 @@ func (s *Secrets) Persist(store db.Accessor) error {
if err != nil {
return fmt.Errorf("failed to insert StateProof key number %v round %d. SQL Error: %w", i, round, err)
}
- round += s.Interval
+ round += s.KeyLifetime
}
return nil
diff --git a/crypto/merklesignature/persistentMerkleSignatureScheme_test.go b/crypto/merklesignature/persistentMerkleSignatureScheme_test.go
index 50ddb404d..970daf903 100644
--- a/crypto/merklesignature/persistentMerkleSignatureScheme_test.go
+++ b/crypto/merklesignature/persistentMerkleSignatureScheme_test.go
@@ -68,7 +68,7 @@ func TestFetchRestoreAllSecrets(t *testing.T) {
store := createTestDB(a)
defer store.Close()
- firstValid := uint64(1)
+ firstValid := uint64(0)
LastValid := uint64(5000)
interval := uint64(256)
@@ -81,17 +81,12 @@ func TestFetchRestoreAllSecrets(t *testing.T) {
err = newMss.RestoreAllSecrets(*store)
a.NoError(err)
- for i := uint64(1); i < LastValid; i++ {
+ for i := uint64(0); i < LastValid; i++ {
key1 := mss.GetKey(i)
key2 := newMss.GetKey(i)
- if i%interval == 0 {
- a.NotNil(key1)
- a.NotNil(key2)
- a.Equal(*key1, *key2)
- continue
- }
- a.Nil(key1)
- a.Nil(key2)
+ a.NotNil(key1)
+ a.NotNil(key2)
+ a.Equal(*key1, *key2)
}
// make sure we exercise the path of the database being upgraded, but then
diff --git a/crypto/merklesignature/posdivs.go b/crypto/merklesignature/posdivs.go
index 6ac430ace..9ce88d53e 100644
--- a/crypto/merklesignature/posdivs.go
+++ b/crypto/merklesignature/posdivs.go
@@ -20,20 +20,11 @@ import (
"errors"
)
-var errRoundMultipleOfInterval = errors.New("the round should be a multiple of the interval")
var errRoundFirstValid = errors.New("the round cannot be less than firstValid")
-var errIntervalZero = errors.New("the interval should not be zero")
-var errRoundNotZero = errors.New("the round should not be zero")
-func checkMerkleSignatureSchemeParams(firstValid, round, interval uint64) error {
- if interval == 0 {
- return errIntervalZero
- }
- if round == 0 {
- return errRoundNotZero
- }
- if round%interval != 0 {
- return errRoundMultipleOfInterval
+func checkMerkleSignatureSchemeParams(firstValid, round, keyLifetime uint64) error {
+ if keyLifetime == 0 {
+ return ErrKeyLifetimeIsZero
}
if round < firstValid {
return errRoundFirstValid
@@ -53,3 +44,10 @@ func indexToRound(firstValid, interval, pos uint64) uint64 {
func roundOfFirstIndex(firstValid, interval uint64) uint64 {
return ((firstValid + interval - 1) / interval) * interval
}
+
+// firstRoundInKeyLifetime calculates the round of the valid key for a given round by lowering to the closest KeyLiftime divisor.
+// It is implicitly assumed that round is larger than keyLifetime, as an MSS key for round 0 is not valid.
+// A key lifetime of 0 is invalid.
+func firstRoundInKeyLifetime(round, keyLifetime uint64) uint64 {
+ return round - (round % keyLifetime)
+}
diff --git a/crypto/merklesignature/posdivs_test.go b/crypto/merklesignature/posdivs_test.go
index 8a48d1c32..579a1c3d5 100644
--- a/crypto/merklesignature/posdivs_test.go
+++ b/crypto/merklesignature/posdivs_test.go
@@ -29,23 +29,23 @@ func TestRoundToIndex(t *testing.T) {
count := uint64(200)
- // firstValid <= interval
+ // firstValid <= keyLifetime
firstValid := uint64(100)
- interval := uint64(101)
+ keyLifetime := uint64(101)
ic := uint64(1)
- checkRoundToIndex(count, ic, firstValid, interval, t)
+ checkRoundToIndex(count, ic, firstValid, keyLifetime, t)
- // firstValid > interval
+ // firstValid > keyLifetime
firstValid = uint64(100)
- interval = uint64(99)
+ keyLifetime = uint64(99)
ic = uint64(2)
- checkRoundToIndex(count, ic, firstValid, interval, t)
+ checkRoundToIndex(count, ic, firstValid, keyLifetime, t)
- // firstValid >> interval
+ // firstValid >> keyLifetime
firstValid = uint64(100)
- interval = uint64(20)
+ keyLifetime = uint64(20)
ic = uint64(5)
- checkRoundToIndex(count, ic, firstValid, interval, t)
+ checkRoundToIndex(count, ic, firstValid, keyLifetime, t)
}
func TestIndexToRoundToIndex(t *testing.T) {
@@ -53,63 +53,52 @@ func TestIndexToRoundToIndex(t *testing.T) {
count := uint64(200)
firstValid := uint64(100)
- interval := uint64(101)
- checkIndexToRoundToIndex(count, firstValid, interval, t)
+ keyLifetime := uint64(101)
+ checkIndexToRoundToIndex(count, firstValid, keyLifetime, t)
firstValid = uint64(100)
- interval = uint64(99)
- checkIndexToRoundToIndex(count, firstValid, interval, t)
+ keyLifetime = uint64(99)
+ checkIndexToRoundToIndex(count, firstValid, keyLifetime, t)
firstValid = uint64(100)
- interval = uint64(20)
- checkIndexToRoundToIndex(count, firstValid, interval, t)
+ keyLifetime = uint64(20)
+ checkIndexToRoundToIndex(count, firstValid, keyLifetime, t)
}
func TestErrors(t *testing.T) {
partitiontest.PartitionTest(t)
- firstValid := uint64(100)
- interval := uint64(101)
- round := uint64(0)
- require.Equal(t, errRoundNotZero, checkMerkleSignatureSchemeParams(firstValid, round, interval))
-
- round = interval - 1
- require.Equal(t, errRoundMultipleOfInterval, checkMerkleSignatureSchemeParams(firstValid, round, interval))
-
- round = interval + 1
- require.Equal(t, errRoundMultipleOfInterval, checkMerkleSignatureSchemeParams(firstValid, round, interval))
-
- firstValid = uint64(101)
- round = firstValid - 1
- interval = round / 2
- require.Equal(t, errRoundFirstValid, checkMerkleSignatureSchemeParams(firstValid, round, interval))
+ firstValid := uint64(101)
+ round := firstValid - 1
+ keyLifetime := round / 2
+ require.Equal(t, errRoundFirstValid, checkMerkleSignatureSchemeParams(firstValid, round, keyLifetime))
- interval = 0
- require.Equal(t, errIntervalZero, checkMerkleSignatureSchemeParams(firstValid, round, interval))
+ keyLifetime = 0
+ require.Equal(t, ErrKeyLifetimeIsZero, checkMerkleSignatureSchemeParams(firstValid, round, keyLifetime))
- interval = 107
+ keyLifetime = 107
round = 107
firstValid = 107
- require.NoError(t, checkMerkleSignatureSchemeParams(firstValid, round, interval))
+ require.NoError(t, checkMerkleSignatureSchemeParams(firstValid, round, keyLifetime))
}
-func checkIndexToRoundToIndex(count, firstValid, interval uint64, t *testing.T) {
+func checkIndexToRoundToIndex(count, firstValid, keyLifetime uint64, t *testing.T) {
for pos := uint64(0); pos < count; pos++ {
- round := indexToRound(firstValid, interval, uint64(pos))
- index := roundToIndex(firstValid, round, interval)
+ round := indexToRound(firstValid, keyLifetime, uint64(pos))
+ index := roundToIndex(firstValid, round, keyLifetime)
require.Equal(t, uint64(pos), index)
}
}
-func checkRoundToIndex(count, initC, firstValid, interval uint64, t *testing.T) {
+func checkRoundToIndex(count, initC, firstValid, keyLifetime uint64, t *testing.T) {
expIndex := uint64(0)
for c := initC; c < count; c++ {
- round := interval * c
- index := roundToIndex(firstValid, round, interval)
+ round := keyLifetime * c
+ index := roundToIndex(firstValid, round, keyLifetime)
require.Equal(t, expIndex, index)
expIndex++
- round2 := indexToRound(firstValid, interval, index)
+ round2 := indexToRound(firstValid, keyLifetime, index)
require.Equal(t, round, round2)
}
diff --git a/crypto/multisig.go b/crypto/multisig.go
index c97e3ce35..53386ebc9 100644
--- a/crypto/multisig.go
+++ b/crypto/multisig.go
@@ -217,7 +217,7 @@ func MultisigAssemble(unisig []MultisigSig) (msig MultisigSig, err error) {
// MultisigVerify verifies an assembled MultisigSig
func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool, err error) {
- batchVerifier := MakeBatchVerifierWithAlgorithmDefaultSize()
+ batchVerifier := MakeBatchVerifier()
if verified, err = MultisigBatchVerify(msg, addr, sig, batchVerifier); err != nil {
return
diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go
index b4439f8c5..28eec2459 100644
--- a/crypto/multisig_test.go
+++ b/crypto/multisig_test.go
@@ -141,7 +141,7 @@ func TestMultisig(t *testing.T) {
require.True(t, verify, "Multisig: verification failed, verify flag was false")
//test3: use the batch verification
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, msig, br)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
require.True(t, verify, "Multisig: verification failed, verify flag was false")
@@ -257,7 +257,7 @@ func TestEmptyMultisig(t *testing.T) {
verify, err := MultisigVerify(txid, addr, emptyMutliSig)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, emptyMutliSig, br)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
@@ -285,7 +285,7 @@ func TestIncorrectAddrresInMultisig(t *testing.T) {
verify, err := MultisigVerify(txid, addr, MutliSig)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, MutliSig, br)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
@@ -324,7 +324,7 @@ func TestMoreThanMaxSigsInMultisig(t *testing.T) {
verify, err := MultisigVerify(txid, addr, msig)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, msig, br)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
@@ -363,7 +363,7 @@ func TestOneSignatureIsEmpty(t *testing.T) {
verify, err := MultisigVerify(txid, addr, msig)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, msig, br)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
@@ -404,7 +404,7 @@ func TestOneSignatureIsInvalid(t *testing.T) {
verify, err := MultisigVerify(txid, addr, msig)
require.False(t, verify, "Multisig: verification succeeded, it should failed")
require.Error(t, err, "Multisig: did not return error as expected")
- br := MakeBatchVerifierWithAlgorithmDefaultSize()
+ br := MakeBatchVerifier()
verify, err = MultisigBatchVerify(txid, addr, msig, br)
require.NoError(t, err, "Multisig: did not return error as expected")
require.True(t, verify, "Multisig: verification succeeded, it should failed")
diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go
index 2f391d8c3..cf1c25642 100644
--- a/crypto/onetimesig.go
+++ b/crypto/onetimesig.go
@@ -308,7 +308,7 @@ func (s *OneTimeSignatureSecrets) Sign(id OneTimeSignatureIdentifier, message Ha
// OneTimeSignatureVerifier and some OneTimeSignatureIdentifier.
//
// It returns true if this is the case; otherwise, it returns false.
-func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message Hashable, sig OneTimeSignature, batchVersionCompatible bool) bool {
+func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message Hashable, sig OneTimeSignature) bool {
offsetID := OneTimeSignatureSubkeyOffsetID{
SubKeyPK: sig.PK,
Batch: id.Batch,
@@ -319,24 +319,12 @@ func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message
Batch: id.Batch,
}
- if batchVersionCompatible {
- return batchVerificationImpl(
- [][]byte{HashRep(batchID), HashRep(offsetID), HashRep(message)},
- []PublicKey{PublicKey(v), PublicKey(batchID.SubKeyPK), PublicKey(offsetID.SubKeyPK)},
- []Signature{Signature(sig.PK2Sig), Signature(sig.PK1Sig), Signature(sig.Sig)},
- )
- }
+ return batchVerificationImpl(
+ [][]byte{HashRep(batchID), HashRep(offsetID), HashRep(message)},
+ []PublicKey{PublicKey(v), PublicKey(batchID.SubKeyPK), PublicKey(offsetID.SubKeyPK)},
+ []Signature{Signature(sig.PK2Sig), Signature(sig.PK1Sig), Signature(sig.Sig)},
+ )
- if !ed25519Verify(ed25519PublicKey(v), HashRep(batchID), sig.PK2Sig, batchVersionCompatible) {
- return false
- }
- if !ed25519Verify(batchID.SubKeyPK, HashRep(offsetID), sig.PK1Sig, batchVersionCompatible) {
- return false
- }
- if !ed25519Verify(offsetID.SubKeyPK, HashRep(message), sig.Sig, batchVersionCompatible) {
- return false
- }
- return true
}
// DeleteBeforeFineGrained deletes ephemeral keys before (but not including) the given id.
diff --git a/crypto/onetimesig_test.go b/crypto/onetimesig_test.go
index 0dffc1907..af60a3c73 100644
--- a/crypto/onetimesig_test.go
+++ b/crypto/onetimesig_test.go
@@ -45,44 +45,44 @@ func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2
s2 := randString()
sig := c.Sign(id, s)
- if !c.Verify(id, s, sig, true) {
+ if !c.Verify(id, s, sig) {
t.Errorf("correct signature failed to verify (ephemeral)")
}
- if c.Verify(id, s2, sig, true) {
+ if c.Verify(id, s2, sig) {
t.Errorf("signature verifies on wrong message")
}
sig2 := c2.Sign(id, s)
- if c.Verify(id, s, sig2, true) {
+ if c.Verify(id, s, sig2) {
t.Errorf("wrong master key incorrectly verified (ephemeral)")
}
otherID := randID()
- if c.Verify(otherID, s, sig, true) {
+ if c.Verify(otherID, s, sig) {
t.Errorf("signature verifies for wrong ID")
}
nextOffsetID := id
nextOffsetID.Offset++
- if c.Verify(nextOffsetID, s, sig, true) {
+ if c.Verify(nextOffsetID, s, sig) {
t.Errorf("signature verifies after changing offset")
}
c.DeleteBeforeFineGrained(nextOffsetID, 256)
sigAfterDelete := c.Sign(id, s)
- if c.Verify(id, s, sigAfterDelete, true) { // TODO(adam): Previously, this call to Verify was verifying old-style coarse-grained one-time signatures. Now it's verifying new-style fine-grained one-time signatures. Is this correct?
+ if c.Verify(id, s, sigAfterDelete) { // TODO(adam): Previously, this call to Verify was verifying old-style coarse-grained one-time signatures. Now it's verifying new-style fine-grained one-time signatures. Is this correct?
t.Errorf("signature verifies after delete offset")
}
sigNextAfterDelete := c.Sign(nextOffsetID, s)
- if !c.Verify(nextOffsetID, s, sigNextAfterDelete, true) {
+ if !c.Verify(nextOffsetID, s, sigNextAfterDelete) {
t.Errorf("signature fails to verify after deleting up to this offset")
}
nextOffsetID.Offset++
sigNext2AfterDelete := c.Sign(nextOffsetID, s)
- if !c.Verify(nextOffsetID, s, sigNext2AfterDelete, true) {
+ if !c.Verify(nextOffsetID, s, sigNext2AfterDelete) {
t.Errorf("signature fails to verify after deleting up to previous offset")
}
@@ -93,18 +93,18 @@ func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2
nextBatchOffsetID.Offset++
c.DeleteBeforeFineGrained(nextBatchOffsetID, 256)
sigAfterDelete = c.Sign(nextBatchID, s)
- if c.Verify(nextBatchID, s, sigAfterDelete, true) {
+ if c.Verify(nextBatchID, s, sigAfterDelete) {
t.Errorf("signature verifies after delete")
}
sigNextAfterDelete = c.Sign(nextBatchOffsetID, s)
- if !c.Verify(nextBatchOffsetID, s, sigNextAfterDelete, true) {
+ if !c.Verify(nextBatchOffsetID, s, sigNextAfterDelete) {
t.Errorf("signature fails to verify after delete up to this offset")
}
nextBatchOffsetID.Offset++
sigNext2AfterDelete = c.Sign(nextBatchOffsetID, s)
- if !c.Verify(nextBatchOffsetID, s, sigNext2AfterDelete, true) {
+ if !c.Verify(nextBatchOffsetID, s, sigNext2AfterDelete) {
t.Errorf("signature fails to verify after delete up to previous offset")
}
@@ -115,27 +115,27 @@ func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2
preBigJumpID := bigJumpID
preBigJumpID.Batch--
- if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s), true) {
+ if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s)) {
t.Errorf("preBigJumpID verifies")
}
preBigJumpID.Batch++
preBigJumpID.Offset--
- if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s), true) {
+ if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s)) {
t.Errorf("preBigJumpID verifies")
}
- if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s), true) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID does not verify")
}
bigJumpID.Offset++
- if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s), true) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID.Offset++ does not verify")
}
bigJumpID.Batch++
- if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s), true) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID.Batch++ does not verify")
}
}
@@ -158,7 +158,7 @@ func BenchmarkOneTimeSigBatchVerification(b *testing.B) {
// verify them
b.ResetTimer()
for i := 0; i < b.N; i++ {
- v.Verify(ids[i], msg, sigs[i], enabled)
+ v.Verify(ids[i], msg, sigs[i])
}
})
}
diff --git a/crypto/stateproof/builder.go b/crypto/stateproof/builder.go
new file mode 100644
index 000000000..3f85656ab
--- /dev/null
+++ b/crypto/stateproof/builder.go
@@ -0,0 +1,260 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// Errors for the StateProof builder
+var (
+ ErrPositionOutOfBound = errors.New("requested position is out of bounds")
+ ErrPositionAlreadyPresent = errors.New("requested position is already present")
+ ErrPositionWithZeroWeight = errors.New("position has zero weight")
+ ErrCoinIndexError = errors.New("could not find corresponding index for a given coin")
+)
+
+// Builder keeps track of signatures on a message and eventually produces
+// a state proof for that message.
+type Builder struct {
+ data MessageHash
+ round uint64
+ sigs []sigslot // Indexed by pos in participants
+ signedWeight uint64 // Total weight of signatures so far
+ participants []basics.Participant
+ parttree *merklearray.Tree
+ lnProvenWeight uint64
+ provenWeight uint64
+ strengthTarget uint64
+ cachedProof *StateProof
+}
+
+// MakeBuilder constructs an empty builder. After adding enough signatures and signed weight, this builder is used to create a stateproof.
+func MakeBuilder(data MessageHash, round uint64, provenWeight uint64, part []basics.Participant, parttree *merklearray.Tree, strengthTarget uint64) (*Builder, error) {
+ npart := len(part)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ if err != nil {
+ return nil, err
+ }
+
+ b := &Builder{
+ data: data,
+ round: round,
+ sigs: make([]sigslot, npart),
+ signedWeight: 0,
+ participants: part,
+ parttree: parttree,
+ lnProvenWeight: lnProvenWt,
+ provenWeight: provenWeight,
+ strengthTarget: strengthTarget,
+ cachedProof: nil,
+ }
+
+ return b, nil
+}
+
+// Present checks if the builder already contains a signature at a particular
+// offset.
+func (b *Builder) Present(pos uint64) (bool, error) {
+ if pos >= uint64(len(b.sigs)) {
+ return false, fmt.Errorf("%w pos %d >= len(b.sigs) %d", ErrPositionOutOfBound, pos, len(b.sigs))
+ }
+
+ return b.sigs[pos].Weight != 0, nil
+}
+
+// IsValid verifies that the participant along with the signature can be inserted to the builder.
+// verifySig can be set to false when the signature is already verified (e.g. loaded from the DB)
+func (b *Builder) IsValid(pos uint64, sig *merklesignature.Signature, verifySig bool) error {
+ if pos >= uint64(len(b.participants)) {
+ return fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.participants))
+ }
+
+ p := b.participants[pos]
+
+ if p.Weight == 0 {
+ return fmt.Errorf("builder.IsValid: %w: position = %d", ErrPositionWithZeroWeight, pos)
+ }
+
+ // Check signature
+ if verifySig {
+ if err := sig.ValidateSaltVersion(merklesignature.SchemeSaltVersion); err != nil {
+ return err
+ }
+ if err := p.PK.VerifyBytes(b.round, b.data[:], sig); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Add a signature to the set of signatures available for building a proof.
+func (b *Builder) Add(pos uint64, sig merklesignature.Signature) error {
+ isPresent, err := b.Present(pos)
+ if err != nil {
+ return err
+ }
+ if isPresent {
+ return ErrPositionAlreadyPresent
+ }
+
+ p := b.participants[pos]
+
+ // Remember the signature
+ b.sigs[pos].Weight = p.Weight
+ b.sigs[pos].Sig = sig
+ b.signedWeight += p.Weight
+ b.cachedProof = nil // can rebuild a more optimized state proof
+ return nil
+}
+
+// Ready returns whether the state proof is ready to be built.
+func (b *Builder) Ready() bool {
+ return b.cachedProof != nil || b.signedWeight > b.provenWeight
+}
+
+// SignedWeight returns the total weight of signatures added so far.
+func (b *Builder) SignedWeight() uint64 {
+ return b.signedWeight
+}
+
+// coinIndex returns the position pos in the sigs array such that the sum
+// of all signature weights before pos is less than or equal to coinWeight,
+// but the sum of all signature weights up to and including pos exceeds
+// coinWeight.
+//
+// coinIndex works by doing a binary search on the sigs array.
+func (b *Builder) coinIndex(coinWeight uint64) (uint64, error) {
+ lo := uint64(0)
+ hi := uint64(len(b.sigs))
+
+again:
+ if lo >= hi {
+ return 0, fmt.Errorf("%w: lo %d >= hi %d and coin %d", ErrCoinIndexError, lo, hi, coinWeight)
+ }
+
+ mid := (lo + hi) / 2
+ if coinWeight < b.sigs[mid].L {
+ hi = mid
+ goto again
+ }
+
+ if coinWeight < b.sigs[mid].L+b.sigs[mid].Weight {
+ return mid, nil
+ }
+
+ lo = mid + 1
+ goto again
+}
+
+// Build returns a state proof, if the builder has accumulated
+// enough signatures to construct it.
+func (b *Builder) Build() (*StateProof, error) {
+ if b.cachedProof != nil {
+ return b.cachedProof, nil
+ }
+
+ if !b.Ready() {
+ return nil, fmt.Errorf("%w: %d <= %d", ErrSignedWeightLessThanProvenWeight, b.signedWeight, b.provenWeight)
+ }
+
+ // Commit to the sigs array
+ for i := 1; i < len(b.sigs); i++ {
+ b.sigs[i].L = b.sigs[i-1].L + b.sigs[i-1].Weight
+ }
+
+ hfactory := crypto.HashFactory{HashType: HashType}
+ sigtree, err := merklearray.BuildVectorCommitmentTree(committableSignatureSlotArray(b.sigs), hfactory)
+ if err != nil {
+ return nil, err
+ }
+
+ // Reveal sufficient number of signatures
+ s := &StateProof{
+ SigCommit: sigtree.Root(),
+ SignedWeight: b.signedWeight,
+ Reveals: make(map[uint64]Reveal),
+ MerkleSignatureSaltVersion: merklesignature.SchemeSaltVersion,
+ }
+
+ nr, err := numReveals(b.signedWeight, b.lnProvenWeight, b.strengthTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ choice := coinChoiceSeed{
+ partCommitment: b.parttree.Root(),
+ lnProvenWeight: b.lnProvenWeight,
+ sigCommitment: s.SigCommit,
+ signedWeight: s.SignedWeight,
+ data: b.data,
+ }
+
+ coinHash := makeCoinGenerator(&choice)
+
+ var proofPositions []uint64
+ revealsSequence := make([]uint64, nr)
+ for j := uint64(0); j < nr; j++ {
+ coin := coinHash.getNextCoin()
+ pos, err := b.coinIndex(coin)
+ if err != nil {
+ return nil, err
+ }
+
+ if pos >= uint64(len(b.participants)) {
+ return nil, fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.participants))
+ }
+
+ revealsSequence[j] = pos
+
+ // If we already revealed pos, no need to do it again
+ _, alreadyRevealed := s.Reveals[pos]
+ if alreadyRevealed {
+ continue
+ }
+
+ // Generate the reveal for pos
+ s.Reveals[pos] = Reveal{
+ SigSlot: b.sigs[pos].sigslotCommit,
+ Part: b.participants[pos],
+ }
+
+ proofPositions = append(proofPositions, pos)
+ }
+
+ sigProofs, err := sigtree.Prove(proofPositions)
+ if err != nil {
+ return nil, err
+ }
+
+ partProofs, err := b.parttree.Prove(proofPositions)
+ if err != nil {
+ return nil, err
+ }
+
+ s.SigProofs = *sigProofs
+ s.PartProofs = *partProofs
+ s.PositionsToReveal = revealsSequence
+ b.cachedProof = s
+ return s, nil
+}
diff --git a/crypto/compactcert/builder_test.go b/crypto/stateproof/builder_test.go
index 8d29e7127..780262e85 100644
--- a/crypto/compactcert/builder_test.go
+++ b/crypto/stateproof/builder_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"bytes"
@@ -36,10 +36,26 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-type testMessage string
+type testMessage []byte
-const compactCertRoundsForTests = 256
-const compactCertSecKQForTests = 128
+func (m testMessage) IntoStateProofMessageHash() MessageHash {
+ hsh := MessageHash{}
+ copy(hsh[:], m)
+ return hsh
+}
+
+type paramsForTest struct {
+ sp StateProof
+ provenWeight uint64
+ partCommitment crypto.GenericDigest
+ numberOfParticipnets uint64
+ data MessageHash
+ builder *Builder
+ sig merklesignature.Signature
+}
+
+const stateProofIntervalForTests = 256
+const stateProofStrengthTargetForTests = 256
func hashBytes(hash hash.Hash, m []byte) []byte {
hash.Reset()
@@ -48,10 +64,6 @@ func hashBytes(hash hash.Hash, m []byte) []byte {
return outhash
}
-func (m testMessage) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.Message, []byte(m)
-}
-
func createParticipantSliceWithWeight(totalWeight, numberOfParticipant int, key *merklesignature.Verifier) []basics.Participant {
parts := make([]basics.Participant, 0, numberOfParticipant)
@@ -73,93 +85,105 @@ func generateTestSigner(firstValid uint64, lastValid uint64, interval uint64, a
return signer
}
-func TestBuildVerify(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- currentRound := basics.Round(compactCertRoundsForTests)
- // Doing a full test of 1M accounts takes too much CPU time in CI.
- doLargeTest := false
+func generateProofForTesting(a *require.Assertions, doLargeTest bool) paramsForTest {
totalWeight := 10000000
- npartHi := 10
- npartLo := 9990
+ npartHi := 2
+ npartLo := 100
+ stateproofIntervals := uint64(4) // affects the number of keys that will be generated
if doLargeTest {
npartHi *= 100
npartLo *= 100
+ stateproofIntervals = 20
}
npart := npartHi + npartLo
- param := Params{
- Msg: testMessage("hello world"),
- ProvenWeight: uint64(totalWeight / 2),
- SigRound: currentRound,
- SecKQ: compactCertSecKQForTests,
- }
+ data := testMessage("hello world").IntoStateProofMessageHash()
+ provenWt := uint64(totalWeight / 2)
// Share the key; we allow the same vote key to appear in multiple accounts..
- key := generateTestSigner(0, uint64(compactCertRoundsForTests)*20+1, compactCertRoundsForTests, a)
+ key := generateTestSigner(0, uint64(stateProofIntervalForTests)*stateproofIntervals+1, stateProofIntervalForTests, a)
var parts []basics.Participant
var sigs []merklesignature.Signature
parts = append(parts, createParticipantSliceWithWeight(totalWeight, npartHi, key.GetVerifier())...)
parts = append(parts, createParticipantSliceWithWeight(totalWeight, npartLo, key.GetVerifier())...)
- signerInRound := key.GetSigner(uint64(currentRound))
- sig, err := signerInRound.Sign(param.Msg)
- require.NoError(t, err, "failed to create keys")
+ signerInRound := key.GetSigner(stateProofIntervalForTests)
+ sig, err := signerInRound.SignBytes(data[:])
+ a.NoError(err, "failed to create keys")
for i := 0; i < npart; i++ {
sigs = append(sigs, sig)
}
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
- if err != nil {
- t.Error(err)
- }
+ a.NoError(err)
- b, err := MkBuilder(param, parts, partcom)
- if err != nil {
- t.Error(err)
- }
+ b, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ a.NoError(err)
- for i := 0; i < npart; i++ {
- err = b.Add(uint64(i), sigs[i], !doLargeTest)
- if err != nil {
- t.Error(err)
- }
+ for i := uint64(0); i < uint64(npart)/2+10; i++ { // leave some signature to be added later in the test (if needed)
+ a.False(b.Present(i))
+ a.NoError(b.IsValid(i, &sigs[i], !doLargeTest))
+ b.Add(i, sigs[i])
+
+ // sanity check that the builder add the signature
+ isPresent, err := b.Present(i)
+ a.NoError(err)
+ a.True(isPresent)
}
- cert, err := b.Build()
- if err != nil {
- t.Error(err)
+ proof, err := b.Build()
+ a.NoError(err)
+
+ p := paramsForTest{
+ sp: *proof,
+ provenWeight: provenWt,
+ partCommitment: partcom.Root(),
+ numberOfParticipnets: uint64(npart),
+ data: data,
+ builder: b,
+ sig: sig,
}
+ return p
+}
+
+func TestBuildVerify(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ p := generateProofForTesting(a, true)
+ sProof := p.sp
var someReveal Reveal
- for _, rev := range cert.Reveals {
+ for _, rev := range sProof.Reveals {
someReveal = rev
break
}
- certenc := protocol.Encode(cert)
- fmt.Printf("Cert size:\n")
- fmt.Printf(" %6d elems sigproofs\n", len(cert.SigProofs.Path))
- fmt.Printf(" %6d bytes sigproofs\n", len(protocol.EncodeReflect(cert.SigProofs)))
- fmt.Printf(" %6d bytes partproofs\n", len(protocol.EncodeReflect(cert.PartProofs)))
- fmt.Printf(" %6d bytes sigproof per reveal\n", len(protocol.EncodeReflect(cert.SigProofs))/len(cert.Reveals))
- fmt.Printf(" %6d reveals:\n", len(cert.Reveals))
+ proofEnc := protocol.Encode(&sProof)
+ fmt.Printf("StateProof size:\n")
+ fmt.Printf(" %6d elems sigproofs\n", len(sProof.SigProofs.Path))
+ fmt.Printf(" %6d bytes sigproofs\n", len(protocol.EncodeReflect(sProof.SigProofs)))
+ fmt.Printf(" %6d bytes partproofs\n", len(protocol.EncodeReflect(sProof.PartProofs)))
+ fmt.Printf(" %6d bytes sigproof per reveal\n", len(protocol.EncodeReflect(sProof.SigProofs))/len(sProof.Reveals))
+ fmt.Printf(" %6d reveals:\n", len(sProof.Reveals))
fmt.Printf(" %6d bytes reveals[*] participant\n", len(protocol.Encode(&someReveal.Part)))
fmt.Printf(" %6d bytes reveals[*] sigslot\n", len(protocol.Encode(&someReveal.SigSlot)))
fmt.Printf(" %6d bytes reveals[*] total\n", len(protocol.Encode(&someReveal)))
- fmt.Printf(" %6d bytes total\n", len(certenc))
+ fmt.Printf(" %6d bytes total\n", len(proofEnc))
+
+ verif, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
- verif := MkVerifier(param, partcom.Root())
- err = verif.Verify(cert)
- require.NoError(t, err, "failed to verify the compact cert")
+ err = verif.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.NoError(err, "failed to verify the state proof")
}
-func generateRandomParticipant(a *require.Assertions, testname string) basics.Participant {
+func generateRandomParticipant(a *require.Assertions) basics.Participant {
key := generateTestSigner(0, 8, 1, a)
p := basics.Participant{
@@ -173,11 +197,15 @@ func calculateHashOnPartLeaf(part basics.Participant) []byte {
binaryWeight := make([]byte, 8)
binary.LittleEndian.PutUint64(binaryWeight, part.Weight)
+ keyLifetimeBytes := make([]byte, 8)
+ binary.LittleEndian.PutUint64(keyLifetimeBytes, part.PK.KeyLifetime)
+
publicKeyBytes := part.PK
- partCommitment := make([]byte, 0, len(protocol.CompactCertPart)+len(binaryWeight)+len(publicKeyBytes))
- partCommitment = append(partCommitment, protocol.CompactCertPart...)
+ partCommitment := make([]byte, 0, len(protocol.StateProofPart)+len(binaryWeight)+len(publicKeyBytes.Commitment)+len(keyLifetimeBytes))
+ partCommitment = append(partCommitment, protocol.StateProofPart...)
partCommitment = append(partCommitment, binaryWeight...)
- partCommitment = append(partCommitment, publicKeyBytes[:]...)
+ partCommitment = append(partCommitment, keyLifetimeBytes...)
+ partCommitment = append(partCommitment, publicKeyBytes.Commitment[:]...)
factory := crypto.HashFactory{HashType: HashType}
hashValue := hashBytes(factory.NewHash(), partCommitment)
@@ -201,10 +229,10 @@ func TestParticipationCommitmentBinaryFormat(t *testing.T) {
a := require.New(t)
var parts []basics.Participant
- parts = append(parts, generateRandomParticipant(a, t.Name()))
- parts = append(parts, generateRandomParticipant(a, t.Name()))
- parts = append(parts, generateRandomParticipant(a, t.Name()))
- parts = append(parts, generateRandomParticipant(a, t.Name()))
+ parts = append(parts, generateRandomParticipant(a))
+ parts = append(parts, generateRandomParticipant(a))
+ parts = append(parts, generateRandomParticipant(a))
+ parts = append(parts, generateRandomParticipant(a))
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
@@ -230,22 +258,16 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
a := require.New(t)
- currentRound := basics.Round(compactCertRoundsForTests)
totalWeight := 10000000
numPart := 4
- param := Params{
- Msg: testMessage("test!"),
- ProvenWeight: uint64(totalWeight / (2 * numPart)),
- SigRound: currentRound,
- SecKQ: compactCertSecKQForTests,
- }
+ data := testMessage("test!").IntoStateProofMessageHash()
var parts []basics.Participant
var sigs []merklesignature.Signature
for i := 0; i < numPart; i++ {
- key := generateTestSigner(0, uint64(compactCertRoundsForTests)*8, compactCertRoundsForTests, a)
+ key := generateTestSigner(0, uint64(stateProofIntervalForTests)*8, stateProofIntervalForTests, a)
part := basics.Participant{
PK: *key.GetVerifier(),
@@ -253,7 +275,7 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
}
parts = append(parts, part)
- sig, err := key.GetSigner(uint64(currentRound)).Sign(param.Msg)
+ sig, err := key.GetSigner(stateProofIntervalForTests).SignBytes(data[:])
require.NoError(t, err, "failed to create keys")
sigs = append(sigs, sig)
@@ -262,21 +284,22 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
- b, err := MkBuilder(param, parts, partcom)
+ b, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/(2*numPart)), parts, partcom, stateProofStrengthTargetForTests)
a.NoError(err)
for i := 0; i < numPart; i++ {
- err = b.Add(uint64(i), sigs[i], false)
- a.NoError(err)
+ a.False(b.Present(uint64(i)))
+ a.NoError(b.IsValid(uint64(i), &sigs[i], false))
+ b.Add(uint64(i), sigs[i])
}
- cert, err := b.Build()
+ sProof, err := b.Build()
a.NoError(err)
- leaf0 := calculateHashOnSigLeaf(t, sigs[0], findLInCert(a, sigs[0], cert))
- leaf1 := calculateHashOnSigLeaf(t, sigs[1], findLInCert(a, sigs[1], cert))
- leaf2 := calculateHashOnSigLeaf(t, sigs[2], findLInCert(a, sigs[2], cert))
- leaf3 := calculateHashOnSigLeaf(t, sigs[3], findLInCert(a, sigs[3], cert))
+ leaf0 := calculateHashOnSigLeaf(t, sigs[0], findLInProof(a, sigs[0], sProof))
+ leaf1 := calculateHashOnSigLeaf(t, sigs[1], findLInProof(a, sigs[1], sProof))
+ leaf2 := calculateHashOnSigLeaf(t, sigs[2], findLInProof(a, sigs[2], sProof))
+ leaf3 := calculateHashOnSigLeaf(t, sigs[3], findLInProof(a, sigs[3], sProof))
// hash internal node according to the vector commitment indices
inner1 := calculateHashOnInternalNode(leaf0, leaf2)
@@ -284,11 +307,11 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
calcRoot := calculateHashOnInternalNode(inner1, inner2)
- a.Equal(cert.SigCommit, crypto.GenericDigest(calcRoot))
+ a.Equal(sProof.SigCommit, crypto.GenericDigest(calcRoot))
}
-// The aim of this test is to simulate how a SNARK circuit will verify a signature.(part of the overall compcatcert verification)
+// The aim of this test is to simulate how a SNARK circuit will verify a signature.(part of the overall stateproof verification)
// it includes parsing the signature's format (according to Algorand's spec) and binds it to a specific length.
// here we also expect the scheme to use Falcon signatures and nothing else.
func TestSimulateSignatureVerification(t *testing.T) {
@@ -297,17 +320,17 @@ func TestSimulateSignatureVerification(t *testing.T) {
signer := generateTestSigner(50, 100, 1, a)
sigRound := uint64(55)
- hashable := testMessage("testMessage")
- sig, err := signer.GetSigner(sigRound).Sign(hashable)
+ msg := testMessage("testMessage")
+ sig, err := signer.GetSigner(sigRound).SignBytes(msg)
a.NoError(err)
genericKey := signer.GetVerifier()
sigBytes, err := sig.GetFixedLengthHashableRepresentation()
a.NoError(err)
- checkSignature(a, sigBytes, genericKey, sigRound, hashable, 5, 6)
+ checkSignature(a, sigBytes, genericKey, sigRound, msg, 5, 6)
}
-// The aim of this test is to simulate how a SNARK circuit will verify a signature.(part of the overall compcatcert verification)
+// The aim of this test is to simulate how a SNARK circuit will verify a signature.(part of the overall stateproof verification)
// it includes parsing the signature's format (according to Algorand's spec) and binds it to a specific length.
// here we also expect the scheme to use Falcon signatures and nothing else.
func TestSimulateSignatureVerificationOneEphemeralKey(t *testing.T) {
@@ -315,20 +338,20 @@ func TestSimulateSignatureVerificationOneEphemeralKey(t *testing.T) {
a := require.New(t)
// we create one ephemeral key so the signature's proof should be with len 0
- signer := generateTestSigner(1, compactCertRoundsForTests, compactCertRoundsForTests, a)
+ signer := generateTestSigner(1, stateProofIntervalForTests, stateProofIntervalForTests, a)
- sigRound := uint64(compactCertRoundsForTests)
- hashable := testMessage("testMessage")
- sig, err := signer.GetSigner(sigRound).Sign(hashable)
+ sigRound := uint64(stateProofIntervalForTests)
+ msg := testMessage("testMessage")
+ sig, err := signer.GetSigner(sigRound).SignBytes(msg)
a.NoError(err)
genericKey := signer.GetVerifier()
sigBytes, err := sig.GetFixedLengthHashableRepresentation()
a.NoError(err)
- checkSignature(a, sigBytes, genericKey, sigRound, hashable, 0, 0)
+ checkSignature(a, sigBytes, genericKey, sigRound, msg, 0, 0)
}
-func checkSignature(a *require.Assertions, sigBytes []byte, verifier *merklesignature.Verifier, round uint64, message crypto.Hashable, expectedIndex uint64, expectedPathLen uint8) {
+func checkSignature(a *require.Assertions, sigBytes []byte, verifier *merklesignature.Verifier, round uint64, message []byte, expectedIndex uint64, expectedPathLen uint8) {
a.Equal(len(sigBytes), 4366)
parsedBytes := 0
@@ -354,7 +377,7 @@ func checkSignature(a *require.Assertions, sigBytes []byte, verifier *merklesign
leafHash = verifyMerklePath(idx, pathLe, sigBytes, parsedBytes, leafHash)
- a.Equal(leafHash, verifier[:])
+ a.Equal(leafHash, verifier.Commitment[:])
}
func verifyMerklePath(idx uint64, pathLe byte, sigBytes []byte, parsedBytes int, leafHash []byte) []byte {
@@ -396,7 +419,7 @@ func hashEphemeralPublicKeyLeaf(round uint64, falconPK [falcon.PublicKeySize]byt
return leafHash
}
-func verifyFalconSignature(a *require.Assertions, sigBytes []byte, parsedBytes int, message crypto.Hashable) (int, [falcon.PublicKeySize]byte) {
+func verifyFalconSignature(a *require.Assertions, sigBytes []byte, parsedBytes int, message []byte) (int, [falcon.PublicKeySize]byte) {
var falconSig [falcon.CTSignatureSize]byte
copy(falconSig[:], sigBytes[parsedBytes:parsedBytes+1538])
parsedBytes += 1538
@@ -407,15 +430,14 @@ func verifyFalconSignature(a *require.Assertions, sigBytes []byte, parsedBytes i
parsedBytes += 1793
ephemeralPk := falcon.PublicKey(falconPK)
- msgBytes := crypto.Hash(crypto.HashRep(message))
- err := ephemeralPk.VerifyCTSignature(ctSign, msgBytes[:])
+ err := ephemeralPk.VerifyCTSignature(ctSign, message)
a.NoError(err)
return parsedBytes, falconPK
}
-func findLInCert(a *require.Assertions, signature merklesignature.Signature, cert *Cert) uint64 {
- for _, t := range cert.Reveals {
- if bytes.Compare(t.SigSlot.Sig.Signature.Signature, signature.Signature) == 0 {
+func findLInProof(a *require.Assertions, signature merklesignature.Signature, proof *StateProof) uint64 {
+ for _, t := range proof.Reveals {
+ if bytes.Compare(t.SigSlot.Sig.Signature, signature.Signature) == 0 {
return t.SigSlot.L
}
}
@@ -423,32 +445,213 @@ func findLInCert(a *require.Assertions, signature merklesignature.Signature, cer
return 0
}
+func TestBuilder_AddRejectsInvalidSigVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // setting up a builder
+ a := require.New(t)
+
+ totalWeight := 10000000
+ npartHi := 1
+ npartLo := 9
+
+ data := testMessage("hello world").IntoStateProofMessageHash()
+
+ key := generateTestSigner(0, uint64(stateProofIntervalForTests)*20+1, stateProofIntervalForTests, a)
+ var parts []basics.Participant
+ parts = append(parts, createParticipantSliceWithWeight(totalWeight, npartHi, key.GetVerifier())...)
+ parts = append(parts, createParticipantSliceWithWeight(totalWeight, npartLo, key.GetVerifier())...)
+
+ partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
+ a.NoError(err)
+
+ builder, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ // actual test:
+ signerInRound := key.GetSigner(stateProofIntervalForTests)
+ sig, err := signerInRound.SignBytes(data[:])
+ require.NoError(t, err, "failed to create keys")
+ // Corrupting the version of the signature:
+ sig.Signature[1]++
+
+ a.ErrorIs(builder.IsValid(0, &sig, true), merklesignature.ErrSignatureSaltVersionMismatch)
+}
+
+func TestBuildAndReady(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ totalWeight := 10000000
+ data := testMessage("hello world").IntoStateProofMessageHash()
+ var parts []basics.Participant
+
+ partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
+ a.NoError(err)
+
+ builder, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ a.False(builder.Ready())
+ _, err = builder.Build()
+ a.ErrorIs(err, ErrSignedWeightLessThanProvenWeight)
+
+ builder.signedWeight = builder.provenWeight
+ a.False(builder.Ready())
+ _, err = builder.Build()
+ a.ErrorIs(err, ErrSignedWeightLessThanProvenWeight)
+
+ builder.signedWeight = builder.provenWeight + 1
+ a.True(builder.Ready())
+ _, err = builder.Build()
+ a.NotErrorIs(err, ErrSignedWeightLessThanProvenWeight)
+
+}
+
+func TestErrorCases(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ builder := Builder{}
+ _, err := builder.Present(1)
+ a.ErrorIs(err, ErrPositionOutOfBound)
+
+ builder.participants = make([]basics.Participant, 1, 1)
+ builder.sigs = make([]sigslot, 1, 1)
+ err = builder.IsValid(1, &merklesignature.Signature{}, false)
+ a.ErrorIs(err, ErrPositionOutOfBound)
+
+ err = builder.IsValid(0, &merklesignature.Signature{}, false)
+ require.ErrorIs(t, err, ErrPositionWithZeroWeight)
+
+ builder.participants[0].Weight = 1
+ err = builder.IsValid(0, &merklesignature.Signature{}, true)
+ a.ErrorIs(err, merklesignature.ErrKeyLifetimeIsZero)
+
+ builder.participants[0].PK.KeyLifetime = 20
+ err = builder.IsValid(0, &merklesignature.Signature{}, true)
+ a.ErrorIs(err, merklesignature.ErrSignatureSchemeVerificationFailed)
+
+ builder.sigs[0].Weight = 1
+ err = builder.Add(1, merklesignature.Signature{})
+ a.ErrorIs(err, ErrPositionOutOfBound)
+
+ err = builder.Add(0, merklesignature.Signature{})
+ a.ErrorIs(err, ErrPositionAlreadyPresent)
+}
+
+func checkSigsArray(n int, a *require.Assertions) {
+ b := &Builder{
+ sigs: make([]sigslot, n),
+ }
+ for i := 0; i < n; i++ {
+ b.sigs[i].L = uint64(i)
+ b.sigs[i].Weight = 1
+ }
+ for i := 0; i < n; i++ {
+ pos, err := b.coinIndex(uint64(i))
+ a.NoError(err)
+ a.Equal(uint64(i), pos)
+ }
+}
+
+func TestCoinIndex(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ n := 1000
+ checkSigsArray(n, a)
+
+ n = 1
+ checkSigsArray(n, a)
+
+ n = 2
+ checkSigsArray(n, a)
+
+ n = 3
+ checkSigsArray(n, a)
+}
+
+func TestCoinIndexBetweenWeights(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ n := 1000
+ b := &Builder{
+ sigs: make([]sigslot, n),
+ }
+ for i := 0; i < n; i++ {
+ b.sigs[i].Weight = 2
+ }
+
+ b.sigs[0].L = 0
+ for i := 1; i < n; i++ {
+ b.sigs[i].L = b.sigs[i-1].L + b.sigs[i-1].Weight
+ }
+ for i := 0; i < 2*n; i++ {
+ pos, err := b.coinIndex(uint64(i))
+ a.NoError(err)
+ a.Equal(pos, uint64(i/2))
+ }
+
+ _, err := b.coinIndex(uint64(2*n + 1))
+ a.ErrorIs(err, ErrCoinIndexError)
+}
+
+func TestBuilderWithZeroProvenWeight(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ data := testMessage("hello world").IntoStateProofMessageHash()
+
+ _, err := MakeBuilder(data, stateProofIntervalForTests, 0, nil, nil, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrIllegalInputForLnApprox)
+
+}
+
+func TestBuilder_BuildStateProofCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ p := generateProofForTesting(a, true)
+ sp1 := &p.sp
+ sp2, err := p.builder.Build()
+ a.NoError(err)
+ a.Equal(sp1, sp2) // already built, no signatures added
+
+ err = p.builder.Add(p.numberOfParticipnets-1, p.sig)
+ a.NoError(err)
+ sp3, err := p.builder.Build()
+ a.NoError(err)
+ a.NotEqual(sp1, sp3) // better StateProof with added signature should have been built
+
+ sp4, err := p.builder.Build()
+ a.NoError(err)
+ a.Equal(sp3, sp4)
+
+ return
+}
+
func BenchmarkBuildVerify(b *testing.B) {
totalWeight := 1000000
- npart := 10000
+ npart := 1000
- currentRound := basics.Round(compactCertRoundsForTests)
a := require.New(b)
- param := Params{
- Msg: testMessage("hello world"),
- ProvenWeight: uint64(totalWeight / 2),
- SigRound: compactCertRoundsForTests,
- SecKQ: compactCertSecKQForTests,
- }
+ provenWeight := uint64(totalWeight / 2)
+ data := testMessage("hello world").IntoStateProofMessageHash()
var parts []basics.Participant
var partkeys []*merklesignature.Secrets
var sigs []merklesignature.Signature
for i := 0; i < npart; i++ {
- signer := generateTestSigner(0, compactCertRoundsForTests, compactCertRoundsForTests+1, a)
+ signer := generateTestSigner(0, stateProofIntervalForTests+1, stateProofIntervalForTests, a)
part := basics.Participant{
PK: *signer.GetVerifier(),
Weight: uint64(totalWeight / npart),
}
- signerInRound := signer.GetSigner(uint64(currentRound))
- sig, err := signerInRound.Sign(param.Msg)
+ signerInRound := signer.GetSigner(stateProofIntervalForTests)
+ sig, err := signerInRound.SignBytes(data[:])
require.NoError(b, err, "failed to create keys")
partkeys = append(partkeys, signer)
@@ -456,7 +659,7 @@ func BenchmarkBuildVerify(b *testing.B) {
parts = append(parts, part)
}
- var cert *Cert
+ var sp *StateProof
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
if err != nil {
b.Error(err)
@@ -464,19 +667,18 @@ func BenchmarkBuildVerify(b *testing.B) {
b.Run("AddBuild", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- builder, err := MkBuilder(param, parts, partcom)
+ builder, err := MakeBuilder(data, stateProofIntervalForTests, provenWeight, parts, partcom, stateProofStrengthTargetForTests)
if err != nil {
b.Error(err)
}
for i := 0; i < npart; i++ {
- err = builder.Add(uint64(i), sigs[i], true)
- if err != nil {
- b.Error(err)
- }
+ a.False(builder.Present(uint64(i)))
+ a.NoError(builder.IsValid(uint64(i), &sigs[i], true))
+ builder.Add(uint64(i), sigs[i])
}
- cert, err = builder.Build()
+ sp, err = builder.Build()
if err != nil {
b.Error(err)
}
@@ -485,31 +687,10 @@ func BenchmarkBuildVerify(b *testing.B) {
b.Run("Verify", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- verif := MkVerifier(param, partcom.Root())
- if err = verif.Verify(cert); err != nil {
+ verif, _ := MkVerifier(partcom.Root(), provenWeight, stateProofStrengthTargetForTests)
+ if err = verif.Verify(stateProofIntervalForTests, data, sp); err != nil {
b.Error(err)
}
}
})
}
-
-func TestCoinIndex(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- n := 1000
- b := &Builder{
- sigs: make([]sigslot, n),
- sigsHasValidL: true,
- }
-
- for i := 0; i < n; i++ {
- b.sigs[i].L = uint64(i)
- b.sigs[i].Weight = 1
- }
-
- for i := 0; i < n; i++ {
- pos, err := b.coinIndex(uint64(i))
- require.NoError(t, err)
- require.Equal(t, pos, uint64(i))
- }
-}
diff --git a/crypto/stateproof/coinGenerator.go b/crypto/stateproof/coinGenerator.go
new file mode 100644
index 000000000..320232fba
--- /dev/null
+++ b/crypto/stateproof/coinGenerator.go
@@ -0,0 +1,125 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "encoding/binary"
+ "golang.org/x/crypto/sha3"
+ "math/big"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// The coinChoiceSeed defines the randomness seed that will be given to an XOF function. This will be used for choosing
+// the index of the coin to reveal as part of the state proof.
+type coinChoiceSeed struct {
+ // the ToBeHashed function should be updated when fields are added to this structure
+ version byte
+ partCommitment crypto.GenericDigest
+ lnProvenWeight uint64
+ sigCommitment crypto.GenericDigest
+ signedWeight uint64
+ data MessageHash
+}
+
+// ToBeHashed returns a binary representation of the coinChoiceSeed structure.
+// Since this code is also implemented as a circuit in the stateproof SNARK prover we can't use
+// msgpack encoding since it may result in a variable length byte slice.
+// Alternatively, we serialize the fields in the structure in a specific format.
+func (cc *coinChoiceSeed) ToBeHashed() (protocol.HashID, []byte) {
+ var signedWtAsBytes [8]byte
+ binary.LittleEndian.PutUint64(signedWtAsBytes[:], cc.signedWeight)
+
+ var lnProvenWtAsBytes [8]byte
+ binary.LittleEndian.PutUint64(lnProvenWtAsBytes[:], cc.lnProvenWeight)
+
+ coinChoiceBytes := make([]byte, 0, 1+len(cc.partCommitment)+len(lnProvenWtAsBytes)+len(cc.sigCommitment)+len(signedWtAsBytes)+len(cc.data))
+ coinChoiceBytes = append(coinChoiceBytes, cc.version)
+ coinChoiceBytes = append(coinChoiceBytes, cc.partCommitment...)
+ coinChoiceBytes = append(coinChoiceBytes, lnProvenWtAsBytes[:]...)
+ coinChoiceBytes = append(coinChoiceBytes, cc.sigCommitment...)
+ coinChoiceBytes = append(coinChoiceBytes, signedWtAsBytes[:]...)
+ coinChoiceBytes = append(coinChoiceBytes, cc.data[:]...)
+
+ return protocol.StateProofCoin, coinChoiceBytes
+}
+
+// coinGenerator is used for extracting "randomized" 64 bits for coin flips
+type coinGenerator struct {
+ shkContext sha3.ShakeHash
+ signedWeight uint64
+ threshold *big.Int
+}
+
+// makeCoinGenerator creates a new CoinHash context.
+// it is used for squeezing 64 bits for coin flips.
+// the function inits the XOF function in the following manner
+// Shake(coinChoiceSeed)
+// we extract 64 bits from shake for each coin flip and divide it by signedWeight
+func makeCoinGenerator(choice *coinChoiceSeed) coinGenerator {
+ choice.version = VersionForCoinGenerator
+ rep := crypto.HashRep(choice)
+ shk := sha3.NewShake256()
+ shk.Write(rep)
+
+ threshold := prepareRejectionSamplingThreshold(choice.signedWeight)
+ return coinGenerator{shkContext: shk, signedWeight: choice.signedWeight, threshold: threshold}
+
+}
+
+func prepareRejectionSamplingThreshold(signedWeight uint64) *big.Int {
+ // we use rejection sampling in order to have a uniform random coin in [0,signedWeight).
+ // use b bits (b=64) per attempt.
+ // define k = roundDown( 2^b / signedWeight ) implemented as (2^b div signedWeight)
+ // and threshold = k*signedWeight
+ threshold := &big.Int{}
+ threshold.SetUint64(1)
+
+ const numberOfBitsPerAttempt = 64
+ threshold.Lsh(threshold, numberOfBitsPerAttempt)
+
+ signedWt := &big.Int{}
+ signedWt.SetUint64(signedWeight)
+
+ // k = 2^b / signedWeight
+ threshold.Div(threshold, signedWt)
+
+ threshold.Mul(threshold, signedWt)
+ return threshold
+}
+
+// getNextCoin returns the next 64bits integer which represents a number between [0,signedWeight)
+func (cg *coinGenerator) getNextCoin() uint64 {
+ // take b bits from the XOF and generate an integer z.
+ // we accept the sample if z < threshold
+ // else, we reject the sample and repeat the process.
+ var randNumFromXof uint64
+ for {
+ var shakeDigest [8]byte
+ cg.shkContext.Read(shakeDigest[:])
+ randNumFromXof = binary.LittleEndian.Uint64(shakeDigest[:])
+
+ z := &big.Int{}
+ z.SetUint64(randNumFromXof)
+ if z.Cmp(cg.threshold) == -1 {
+ break
+ }
+ }
+
+ return randNumFromXof % cg.signedWeight
+}
diff --git a/crypto/stateproof/coinGenerator_test.go b/crypto/stateproof/coinGenerator_test.go
new file mode 100644
index 000000000..39f3d760c
--- /dev/null
+++ b/crypto/stateproof/coinGenerator_test.go
@@ -0,0 +1,186 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+// make sure that ToBeHashed function returns a specific length
+// If this test breaks we need to make sure to update the SNARK prover and verifier as well.
+func TestCoinFixedLengthHash(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var sigcom = make(crypto.GenericDigest, HashSize)
+ var partcom = make(crypto.GenericDigest, HashSize)
+ var data MessageHash
+
+ crypto.RandBytes(sigcom[:])
+ crypto.RandBytes(partcom[:])
+ crypto.RandBytes(data[:])
+
+ choice := coinChoiceSeed{
+ partCommitment: partcom,
+ lnProvenWeight: 454197,
+ sigCommitment: sigcom,
+ signedWeight: 1 << 10,
+ data: data,
+ }
+ e := reflect.ValueOf(choice)
+ a.Equal(6, e.NumField())
+
+ rep := crypto.HashRep(&choice)
+ a.Equal(180, len(rep))
+}
+
+func TestHashCoin(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var slots [32]uint64
+ var sigcom = make(crypto.GenericDigest, HashSize)
+ var partcom = make(crypto.GenericDigest, HashSize)
+ var msgHash MessageHash
+
+ crypto.RandBytes(sigcom[:])
+ crypto.RandBytes(partcom[:])
+ crypto.RandBytes(msgHash[:])
+
+ choice := coinChoiceSeed{
+ signedWeight: uint64(len(slots)),
+ sigCommitment: sigcom,
+ partCommitment: partcom,
+ data: msgHash,
+ }
+ coinHash := makeCoinGenerator(&choice)
+
+ for j := uint64(0); j < 1000; j++ {
+ coin := coinHash.getNextCoin()
+ if coin >= uint64(len(slots)) {
+ t.Errorf("hashCoin out of bounds")
+ }
+
+ slots[coin]++
+ }
+
+ for i, count := range slots {
+ if count < 3 {
+ t.Errorf("slot %d too low: %d", i, count)
+ }
+ if count > 100 {
+ t.Errorf("slot %d too high: %d", i, count)
+ }
+ }
+}
+
+func BenchmarkHashCoin(b *testing.B) {
+ var sigcom = make(crypto.GenericDigest, HashSize)
+ var partcom = make(crypto.GenericDigest, HashSize)
+ var msgHash MessageHash
+
+ crypto.RandBytes(sigcom[:])
+ crypto.RandBytes(partcom[:])
+ crypto.RandBytes(msgHash[:])
+
+ choice := coinChoiceSeed{
+ signedWeight: 1025,
+ sigCommitment: sigcom,
+ partCommitment: partcom,
+ data: msgHash,
+ }
+ coinHash := makeCoinGenerator(&choice)
+
+ for i := 0; i < b.N; i++ {
+ coinHash.getNextCoin()
+ }
+}
+
+func BenchmarkHashCoinGenerate(b *testing.B) {
+ var sigcom = make(crypto.GenericDigest, HashSize)
+ var partcom = make(crypto.GenericDigest, HashSize)
+ var msgHash MessageHash
+
+ crypto.RandBytes(sigcom[:])
+ crypto.RandBytes(partcom[:])
+ crypto.RandBytes(msgHash[:])
+
+ choice := coinChoiceSeed{
+ signedWeight: 1025,
+ sigCommitment: sigcom,
+ partCommitment: partcom,
+ data: msgHash,
+ }
+
+ for i := 0; i < b.N; i++ {
+ makeCoinGenerator(&choice)
+ }
+}
+
+func TestGenerateCoinHashKATs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // This test produces MSS samples for the SNARK verifier.
+ // it will only run explicitly by:
+ //
+ // GEN_KATS=x go test -v . -run=GenerateKat -count=1
+ if os.Getenv("GEN_KATS") == "" {
+ t.Skip("Skipping; GEN_KATS not set")
+ }
+
+ const numReveals = 1000
+ const signedWt = 1 << 10
+ var coinslots [numReveals]uint64
+ var sigcom = make(crypto.GenericDigest, HashSize)
+ var partcom = make(crypto.GenericDigest, HashSize)
+ var data MessageHash
+
+ crypto.RandBytes(sigcom[:])
+ crypto.RandBytes(partcom[:])
+ crypto.RandBytes(data[:])
+
+ choice := coinChoiceSeed{
+ partCommitment: partcom,
+ lnProvenWeight: 454197,
+ sigCommitment: sigcom,
+ signedWeight: signedWt,
+ data: data,
+ }
+
+ coinHash := makeCoinGenerator(&choice)
+
+ for j := uint64(0); j < numReveals; j++ {
+ coinslots[j] = coinHash.getNextCoin()
+
+ }
+ fmt.Printf("signedWeight: %v \n", signedWt)
+ fmt.Printf("number of reveals: %v \n", numReveals)
+ concatString := fmt.Sprint(coinslots)
+ toPrint := strings.Join(strings.Split(concatString, " "), ", ")
+ fmt.Printf("coinvalues: %v \n", toPrint)
+ concatString = fmt.Sprint(crypto.HashRep(&choice))
+ toPrint = strings.Join(strings.Split(concatString, " "), ", ")
+ fmt.Printf("seed: %v \n", toPrint)
+}
diff --git a/crypto/compactcert/committableSignatureSlot.go b/crypto/stateproof/committableSignatureSlot.go
index 97b949cfe..78aef90ec 100644
--- a/crypto/compactcert/committableSignatureSlot.go
+++ b/crypto/stateproof/committableSignatureSlot.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"encoding/binary"
@@ -34,9 +34,9 @@ type committableSignatureSlot struct {
// ErrIndexOutOfBound returned when an index is out of the array's bound
var ErrIndexOutOfBound = errors.New("index is out of bound")
-// committableSignatureSlotArray is used to create a merkle tree on the compact cert's
+// committableSignatureSlotArray is used to create a merkle tree on the stateproof's
// signature array. it serializes the MSS signatures using a specific format
-// compact cert signature array.
+// state proof signature array.
//msgp:ignore committableSignatureSlotArray
type committableSignatureSlotArray []sigslot
@@ -58,9 +58,12 @@ func (sc committableSignatureSlotArray) Marshal(pos uint64) (crypto.Hashable, er
}
func buildCommittableSignature(sigCommit sigslotCommit) (*committableSignatureSlot, error) {
- if sigCommit.Sig.Signature.Signature == nil {
+ if sigCommit.Sig.MsgIsZero() { // Empty merkle signature
return &committableSignatureSlot{isEmptySlot: true}, nil
}
+ if sigCommit.Sig.Signature == nil { // Merkle signature is not empty, but falcon signature is (invalid case)
+ return nil, fmt.Errorf("buildCommittableSignature: Falcon signature is nil")
+ }
sigBytes, err := sigCommit.Sig.GetFixedLengthHashableRepresentation()
if err != nil {
return nil, err
@@ -74,14 +77,14 @@ func buildCommittableSignature(sigCommit sigslotCommit) (*committableSignatureSl
// be bad for creating SNARK
func (cs *committableSignatureSlot) ToBeHashed() (protocol.HashID, []byte) {
if cs.isEmptySlot {
- return protocol.CompactCertSig, []byte{}
+ return protocol.StateProofSig, []byte{}
}
- binaryLValue := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryLValue, cs.sigCommit.L)
+ var binaryLValue [8]byte
+ binary.LittleEndian.PutUint64(binaryLValue[:], cs.sigCommit.L)
- sigSlotCommitment := make([]byte, 0, len(binaryLValue)+len(cs.serializedSignature))
- sigSlotCommitment = append(sigSlotCommitment, binaryLValue...)
- sigSlotCommitment = append(sigSlotCommitment, cs.serializedSignature...)
+ sigSlotByteRepresentation := make([]byte, 0, len(binaryLValue)+len(cs.serializedSignature))
+ sigSlotByteRepresentation = append(sigSlotByteRepresentation, binaryLValue[:]...)
+ sigSlotByteRepresentation = append(sigSlotByteRepresentation, cs.serializedSignature...)
- return protocol.CompactCertSig, sigSlotCommitment
+ return protocol.StateProofSig, sigSlotByteRepresentation
}
diff --git a/crypto/compactcert/committableSignatureSlot_test.go b/crypto/stateproof/committableSignatureSlot_test.go
index e7cb31181..811c79c4d 100644
--- a/crypto/compactcert/committableSignatureSlot_test.go
+++ b/crypto/stateproof/committableSignatureSlot_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"encoding/binary"
@@ -34,29 +34,29 @@ func TestSignatureArrayWithEmptySlot(t *testing.T) {
a := require.New(t)
sigs := make([]sigslot, 2)
- key := generateTestSigner(0, uint64(compactCertRoundsForTests)*20+1, compactCertRoundsForTests, a)
+ key := generateTestSigner(0, uint64(stateProofIntervalForTests)*20+1, stateProofIntervalForTests, a)
message := testMessage("hello world")
- sig, err := key.GetSigner(uint64(256)).Sign(message)
+ sig, err := key.GetSigner(uint64(256)).SignBytes(message)
a.NoError(err)
sigs[0] = sigslot{
Weight: 60,
- sigslotCommit: sigslotCommit{Sig: CompactOneTimeSignature{Signature: sig}, L: 60},
+ sigslotCommit: sigslotCommit{Sig: sig, L: 60},
}
hfactory := crypto.HashFactory{HashType: HashType}
tree, err := merklearray.BuildVectorCommitmentTree(committableSignatureSlotArray(sigs), hfactory)
leftLeafHash := calculateHashOnSigLeaf(t, sig, 60)
- rightLeafHash := hashBytes(hfactory.NewHash(), []byte(protocol.CompactCertSig))
+ rightLeafHash := hashBytes(hfactory.NewHash(), []byte(protocol.StateProofSig))
a.Equal([]byte(tree.Root()), calculateHashOnInternalNode(leftLeafHash, rightLeafHash))
}
func calculateHashOnSigLeaf(t *testing.T, sig merklesignature.Signature, lValue uint64) []byte {
var sigCommitment []byte
- sigCommitment = append(sigCommitment, protocol.CompactCertSig...)
+ sigCommitment = append(sigCommitment, protocol.StateProofSig...)
binaryL := make([]byte, 8)
binary.LittleEndian.PutUint64(binaryL, lValue)
@@ -64,7 +64,7 @@ func calculateHashOnSigLeaf(t *testing.T, sig merklesignature.Signature, lValue
sigCommitment = append(sigCommitment, binaryL...)
//build the expected binary representation of the merkle signature
- serializedSig, err := sig.VerifyingKey.GetSignatureFixedLengthHashableRepresentation(sig.Signature)
+ serializedSig, err := sig.Signature.GetFixedLengthHashableRepresentation()
require.NoError(t, err)
schemeType := make([]byte, 2)
@@ -75,7 +75,7 @@ func calculateHashOnSigLeaf(t *testing.T, sig merklesignature.Signature, lValue
sigCommitment = append(sigCommitment, sig.VerifyingKey.GetFixedLengthHashableRepresentation()...)
treeIdxBytes := make([]byte, 8)
- binary.LittleEndian.PutUint64(treeIdxBytes, sig.MerkleArrayIndex)
+ binary.LittleEndian.PutUint64(treeIdxBytes, sig.VectorCommitmentIndex)
sigCommitment = append(sigCommitment, treeIdxBytes...)
//build the expected binary representation of the merkle signature proof
diff --git a/crypto/stateproof/const.go b/crypto/stateproof/const.go
new file mode 100644
index 000000000..a9dab2813
--- /dev/null
+++ b/crypto/stateproof/const.go
@@ -0,0 +1,37 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+)
+
+// HashType/ hashSize relate to the type of hash this package uses.
+const (
+ HashType = crypto.Sumhash
+ HashSize = crypto.SumhashDigestSize
+ precisionBits = uint8(16) // number of bits used for log approximation. This should not exceed 63
+ ln2IntApproximation = uint64(45427) // the value of the ln(2) with 16 bits of precision (i.e ln2IntApproximation = ceil( 2^precisionBits * ln(2) ))
+ MaxReveals = 640 // MaxReveals is a bound on allocation and on numReveals to limit log computation
+ // VersionForCoinGenerator is used as part of the seed for Fiat-Shamir. We would change this
+ // value if the state proof verifier algorithm changes. This will allow us to make different coins for different state proof verification algorithms
+ VersionForCoinGenerator = byte(0)
+ // MaxTreeDepth defines the maximal size of a merkle tree depth the state proof allows.
+ MaxTreeDepth = 20
+ // MessageHashType is the type of hash used to generate MessageHash
+ MessageHashType = crypto.Sha256
+)
diff --git a/crypto/compactcert/msgp_gen.go b/crypto/stateproof/msgp_gen.go
index 86fe2426b..93e383c42 100644
--- a/crypto/compactcert/msgp_gen.go
+++ b/crypto/stateproof/msgp_gen.go
@@ -1,4 +1,4 @@
-package compactcert
+package stateproof
// Code generated by github.com/algorand/msgp DO NOT EDIT.
@@ -9,21 +9,13 @@ import (
)
// The following msgp objects are implemented in this file:
-// Cert
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// CompactOneTimeSignature
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
+// MessageHash
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
//
// Reveal
// |-----> (*) MarshalMsg
@@ -33,7 +25,7 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// coinChoice
+// StateProof
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
// |-----> (*) UnmarshalMsg
@@ -51,454 +43,42 @@ import (
//
// MarshalMsg implements msgp.Marshaler
-func (z *Cert) MarshalMsg(b []byte) (o []byte) {
+func (z *MessageHash) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0003Len := uint32(5)
- var zb0003Mask uint8 /* 6 bits */
- if (*z).PartProofs.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1
- }
- if (*z).SigProofs.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2
- }
- if (*z).SigCommit.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8
- }
- if len((*z).Reveals) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
- }
- if (*z).SignedWeight == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
- }
- // variable map header, size zb0003Len
- o = append(o, 0x80|uint8(zb0003Len))
- if zb0003Len != 0 {
- if (zb0003Mask & 0x1) == 0 { // if not empty
- // string "P"
- o = append(o, 0xa1, 0x50)
- o = (*z).PartProofs.MarshalMsg(o)
- }
- if (zb0003Mask & 0x2) == 0 { // if not empty
- // string "S"
- o = append(o, 0xa1, 0x53)
- o = (*z).SigProofs.MarshalMsg(o)
- }
- if (zb0003Mask & 0x8) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = (*z).SigCommit.MarshalMsg(o)
- }
- if (zb0003Mask & 0x10) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- if (*z).Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).Reveals)))
- }
- zb0001_keys := make([]uint64, 0, len((*z).Reveals))
- for zb0001 := range (*z).Reveals {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(SortUint64(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).Reveals[zb0001]
- _ = zb0002
- o = msgp.AppendUint64(o, zb0001)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0003Mask & 0x20) == 0 { // if not empty
- // string "w"
- o = append(o, 0xa1, 0x77)
- o = msgp.AppendUint64(o, (*z).SignedWeight)
- }
- }
+ o = msgp.AppendBytes(o, (*z)[:])
return
}
-func (_ *Cert) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*Cert)
+func (_ *MessageHash) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*MessageHash)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 > 0 {
- zb0003--
- bts, err = (*z).SigCommit.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- bts, err = (*z).SigProofs.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- bts, err = (*z).PartProofs.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0005 > MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0006 {
- (*z).Reveals = nil
- } else if (*z).Reveals == nil {
- (*z).Reveals = make(map[uint64]Reveal, zb0005)
- }
- for zb0005 > 0 {
- var zb0001 uint64
- var zb0002 Reveal
- zb0005--
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- bts, err = zb0002.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0001)
- return
- }
- (*z).Reveals[zb0001] = zb0002
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 {
- (*z) = Cert{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "c":
- bts, err = (*z).SigCommit.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "w":
- (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- case "S":
- bts, err = (*z).SigProofs.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- case "P":
- bts, err = (*z).PartProofs.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- case "r":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0007 > MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(MaxReveals))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0008 {
- (*z).Reveals = nil
- } else if (*z).Reveals == nil {
- (*z).Reveals = make(map[uint64]Reveal, zb0007)
- }
- for zb0007 > 0 {
- var zb0001 uint64
- var zb0002 Reveal
- zb0007--
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- bts, err = zb0002.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0001)
- return
- }
- (*z).Reveals[zb0001] = zb0002
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
+func (z *MessageHash) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ bts, err = msgp.ReadExactBytes(bts, (*z)[:])
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
}
o = bts
return
}
-func (_ *Cert) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*Cert)
+func (_ *MessageHash) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*MessageHash)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *Cert) Msgsize() (s int) {
- s = 1 + 2 + (*z).SigCommit.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).SigProofs.Msgsize() + 2 + (*z).PartProofs.Msgsize() + 2 + msgp.MapHeaderSize
- if (*z).Reveals != nil {
- for zb0001, zb0002 := range (*z).Reveals {
- _ = zb0001
- _ = zb0002
- s += 0 + msgp.Uint64Size + zb0002.Msgsize()
- }
- }
+func (z *MessageHash) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize))
return
}
// MsgIsZero returns whether this is a zero value
-func (z *Cert) MsgIsZero() bool {
- return ((*z).SigCommit.MsgIsZero()) && ((*z).SignedWeight == 0) && ((*z).SigProofs.MsgIsZero()) && ((*z).PartProofs.MsgIsZero()) && (len((*z).Reveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *CompactOneTimeSignature) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(4)
- var zb0001Mask uint8 /* 6 bits */
- if (*z).Signature.MerkleArrayIndex == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).Signature.Proof.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- if (*z).Signature.Signature.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x10
- }
- if (*z).Signature.VerifyingKey.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x20
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "idx"
- o = append(o, 0xa3, 0x69, 0x64, 0x78)
- o = msgp.AppendUint64(o, (*z).Signature.MerkleArrayIndex)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "prf"
- o = append(o, 0xa3, 0x70, 0x72, 0x66)
- o = (*z).Signature.Proof.MarshalMsg(o)
- }
- if (zb0001Mask & 0x10) == 0 { // if not empty
- // string "sig"
- o = append(o, 0xa3, 0x73, 0x69, 0x67)
- o = (*z).Signature.Signature.MarshalMsg(o)
- }
- if (zb0001Mask & 0x20) == 0 { // if not empty
- // string "vkey"
- o = append(o, 0xa4, 0x76, 0x6b, 0x65, 0x79)
- o = (*z).Signature.VerifyingKey.MarshalMsg(o)
- }
- }
- return
-}
-
-func (_ *CompactOneTimeSignature) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactOneTimeSignature)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *CompactOneTimeSignature) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Signature.Signature.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Signature")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Signature.MerkleArrayIndex, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MerkleArrayIndex")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Signature.Proof.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Proof")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Signature.VerifyingKey.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VerifyingKey")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = CompactOneTimeSignature{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "sig":
- bts, err = (*z).Signature.Signature.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Signature")
- return
- }
- case "idx":
- (*z).Signature.MerkleArrayIndex, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "MerkleArrayIndex")
- return
- }
- case "prf":
- bts, err = (*z).Signature.Proof.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Proof")
- return
- }
- case "vkey":
- bts, err = (*z).Signature.VerifyingKey.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VerifyingKey")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *CompactOneTimeSignature) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactOneTimeSignature)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *CompactOneTimeSignature) Msgsize() (s int) {
- s = 1 + 4 + (*z).Signature.Signature.Msgsize() + 4 + msgp.Uint64Size + 4 + (*z).Signature.Proof.Msgsize() + 5 + (*z).Signature.VerifyingKey.Msgsize()
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *CompactOneTimeSignature) MsgIsZero() bool {
- return ((*z).Signature.Signature.MsgIsZero()) && ((*z).Signature.MerkleArrayIndex == 0) && ((*z).Signature.Proof.MsgIsZero()) && ((*z).Signature.VerifyingKey.MsgIsZero())
+func (z *MessageHash) MsgIsZero() bool {
+ return (*z) == (MessageHash{})
}
// MarshalMsg implements msgp.Marshaler
@@ -781,140 +361,228 @@ func (z *Reveal) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *coinChoice) MarshalMsg(b []byte) (o []byte) {
+func (z *StateProof) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(6)
- var zb0001Mask uint8 /* 7 bits */
- if (*z).J == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
+ zb0004Len := uint32(7)
+ var zb0004Mask uint8 /* 8 bits */
+ if (*z).PartProofs.MsgIsZero() {
+ zb0004Len--
+ zb0004Mask |= 0x1
}
- if (*z).MsgHash.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
+ if (*z).SigProofs.MsgIsZero() {
+ zb0004Len--
+ zb0004Mask |= 0x2
}
- if (*z).Partcom.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x8
+ if (*z).SigCommit.MsgIsZero() {
+ zb0004Len--
+ zb0004Mask |= 0x8
}
- if (*z).ProvenWeight == 0 {
- zb0001Len--
- zb0001Mask |= 0x10
+ if len((*z).PositionsToReveal) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x10
}
- if (*z).Sigcom.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x20
+ if len((*z).Reveals) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x20
+ }
+ if (*z).MerkleSignatureSaltVersion == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).SignedWeight == 0 {
- zb0001Len--
- zb0001Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "j"
- o = append(o, 0xa1, 0x6a)
- o = msgp.AppendUint64(o, (*z).J)
+ // variable map header, size zb0004Len
+ o = append(o, 0x80|uint8(zb0004Len))
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x1) == 0 { // if not empty
+ // string "P"
+ o = append(o, 0xa1, 0x50)
+ o = (*z).PartProofs.MarshalMsg(o)
}
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "msghash"
- o = append(o, 0xa7, 0x6d, 0x73, 0x67, 0x68, 0x61, 0x73, 0x68)
- o = (*z).MsgHash.MarshalMsg(o)
+ if (zb0004Mask & 0x2) == 0 { // if not empty
+ // string "S"
+ o = append(o, 0xa1, 0x53)
+ o = (*z).SigProofs.MarshalMsg(o)
}
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "partcom"
- o = append(o, 0xa7, 0x70, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6d)
- o = (*z).Partcom.MarshalMsg(o)
+ if (zb0004Mask & 0x8) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ o = (*z).SigCommit.MarshalMsg(o)
}
- if (zb0001Mask & 0x10) == 0 { // if not empty
- // string "provenweight"
- o = append(o, 0xac, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x6e, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74)
- o = msgp.AppendUint64(o, (*z).ProvenWeight)
+ if (zb0004Mask & 0x10) == 0 { // if not empty
+ // string "pr"
+ o = append(o, 0xa2, 0x70, 0x72)
+ if (*z).PositionsToReveal == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).PositionsToReveal)))
+ }
+ for zb0003 := range (*z).PositionsToReveal {
+ o = msgp.AppendUint64(o, (*z).PositionsToReveal[zb0003])
+ }
+ }
+ if (zb0004Mask & 0x20) == 0 { // if not empty
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ if (*z).Reveals == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Reveals)))
+ }
+ zb0001_keys := make([]uint64, 0, len((*z).Reveals))
+ for zb0001 := range (*z).Reveals {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortUint64(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Reveals[zb0001]
+ _ = zb0002
+ o = msgp.AppendUint64(o, zb0001)
+ o = zb0002.MarshalMsg(o)
+ }
}
- if (zb0001Mask & 0x20) == 0 { // if not empty
- // string "sigcom"
- o = append(o, 0xa6, 0x73, 0x69, 0x67, 0x63, 0x6f, 0x6d)
- o = (*z).Sigcom.MarshalMsg(o)
+ if (zb0004Mask & 0x40) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendByte(o, (*z).MerkleSignatureSaltVersion)
}
- if (zb0001Mask & 0x40) == 0 { // if not empty
- // string "sigweight"
- o = append(o, 0xa9, 0x73, 0x69, 0x67, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74)
+ if (zb0004Mask & 0x80) == 0 { // if not empty
+ // string "w"
+ o = append(o, 0xa1, 0x77)
o = msgp.AppendUint64(o, (*z).SignedWeight)
}
}
return
}
-func (_ *coinChoice) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*coinChoice)
+func (_ *StateProof) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProof)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *coinChoice) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *StateProof) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0001 > 0 {
- zb0001--
- (*z).J, bts, err = msgp.ReadUint64Bytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ bts, err = (*z).SigCommit.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "J")
+ err = msgp.WrapError(err, "struct-from-array", "SigCommit")
return
}
}
- if zb0001 > 0 {
- zb0001--
+ if zb0004 > 0 {
+ zb0004--
(*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
return
}
}
- if zb0001 > 0 {
- zb0001--
- (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ bts, err = (*z).SigProofs.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ProvenWeight")
+ err = msgp.WrapError(err, "struct-from-array", "SigProofs")
return
}
}
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Sigcom.UnmarshalMsg(bts)
+ if zb0004 > 0 {
+ zb0004--
+ bts, err = (*z).PartProofs.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sigcom")
+ err = msgp.WrapError(err, "struct-from-array", "PartProofs")
return
}
}
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Partcom.UnmarshalMsg(bts)
+ if zb0004 > 0 {
+ zb0004--
+ (*z).MerkleSignatureSaltVersion, bts, err = msgp.ReadByteBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Partcom")
+ err = msgp.WrapError(err, "struct-from-array", "MerkleSignatureSaltVersion")
return
}
}
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).MsgHash.UnmarshalMsg(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MsgHash")
+ err = msgp.WrapError(err, "struct-from-array", "Reveals")
+ return
+ }
+ if zb0006 > MaxReveals {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(MaxReveals))
+ err = msgp.WrapError(err, "struct-from-array", "Reveals")
return
}
+ if zb0007 {
+ (*z).Reveals = nil
+ } else if (*z).Reveals == nil {
+ (*z).Reveals = make(map[uint64]Reveal, zb0006)
+ }
+ for zb0006 > 0 {
+ var zb0001 uint64
+ var zb0002 Reveal
+ zb0006--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Reveals")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0001)
+ return
+ }
+ (*z).Reveals[zb0001] = zb0002
+ }
}
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PositionsToReveal")
+ return
+ }
+ if zb0008 > MaxReveals {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(MaxReveals))
+ err = msgp.WrapError(err, "struct-from-array", "PositionsToReveal")
+ return
+ }
+ if zb0009 {
+ (*z).PositionsToReveal = nil
+ } else if (*z).PositionsToReveal != nil && cap((*z).PositionsToReveal) >= zb0008 {
+ (*z).PositionsToReveal = ((*z).PositionsToReveal)[:zb0008]
+ } else {
+ (*z).PositionsToReveal = make([]uint64, zb0008)
+ }
+ for zb0003 := range (*z).PositionsToReveal {
+ (*z).PositionsToReveal[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PositionsToReveal", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -925,53 +593,108 @@ func (z *coinChoice) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0002 {
- (*z) = coinChoice{}
+ if zb0005 {
+ (*z) = StateProof{}
}
- for zb0001 > 0 {
- zb0001--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch string(field) {
- case "j":
- (*z).J, bts, err = msgp.ReadUint64Bytes(bts)
+ case "c":
+ bts, err = (*z).SigCommit.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "J")
+ err = msgp.WrapError(err, "SigCommit")
return
}
- case "sigweight":
+ case "w":
(*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SignedWeight")
return
}
- case "provenweight":
- (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ case "S":
+ bts, err = (*z).SigProofs.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "ProvenWeight")
+ err = msgp.WrapError(err, "SigProofs")
return
}
- case "sigcom":
- bts, err = (*z).Sigcom.UnmarshalMsg(bts)
+ case "P":
+ bts, err = (*z).PartProofs.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "Sigcom")
+ err = msgp.WrapError(err, "PartProofs")
return
}
- case "partcom":
- bts, err = (*z).Partcom.UnmarshalMsg(bts)
+ case "v":
+ (*z).MerkleSignatureSaltVersion, bts, err = msgp.ReadByteBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "Partcom")
+ err = msgp.WrapError(err, "MerkleSignatureSaltVersion")
return
}
- case "msghash":
- bts, err = (*z).MsgHash.UnmarshalMsg(bts)
+ case "r":
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "MsgHash")
+ err = msgp.WrapError(err, "Reveals")
return
}
+ if zb0010 > MaxReveals {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(MaxReveals))
+ err = msgp.WrapError(err, "Reveals")
+ return
+ }
+ if zb0011 {
+ (*z).Reveals = nil
+ } else if (*z).Reveals == nil {
+ (*z).Reveals = make(map[uint64]Reveal, zb0010)
+ }
+ for zb0010 > 0 {
+ var zb0001 uint64
+ var zb0002 Reveal
+ zb0010--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Reveals")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Reveals", zb0001)
+ return
+ }
+ (*z).Reveals[zb0001] = zb0002
+ }
+ case "pr":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PositionsToReveal")
+ return
+ }
+ if zb0012 > MaxReveals {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(MaxReveals))
+ err = msgp.WrapError(err, "PositionsToReveal")
+ return
+ }
+ if zb0013 {
+ (*z).PositionsToReveal = nil
+ } else if (*z).PositionsToReveal != nil && cap((*z).PositionsToReveal) >= zb0012 {
+ (*z).PositionsToReveal = ((*z).PositionsToReveal)[:zb0012]
+ } else {
+ (*z).PositionsToReveal = make([]uint64, zb0012)
+ }
+ for zb0003 := range (*z).PositionsToReveal {
+ (*z).PositionsToReveal[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PositionsToReveal", zb0003)
+ return
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -985,20 +708,28 @@ func (z *coinChoice) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
-func (_ *coinChoice) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*coinChoice)
+func (_ *StateProof) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProof)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *coinChoice) Msgsize() (s int) {
- s = 1 + 2 + msgp.Uint64Size + 10 + msgp.Uint64Size + 13 + msgp.Uint64Size + 7 + (*z).Sigcom.Msgsize() + 8 + (*z).Partcom.Msgsize() + 8 + (*z).MsgHash.Msgsize()
+func (z *StateProof) Msgsize() (s int) {
+ s = 1 + 2 + (*z).SigCommit.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).SigProofs.Msgsize() + 2 + (*z).PartProofs.Msgsize() + 2 + msgp.ByteSize + 2 + msgp.MapHeaderSize
+ if (*z).Reveals != nil {
+ for zb0001, zb0002 := range (*z).Reveals {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + msgp.Uint64Size + zb0002.Msgsize()
+ }
+ }
+ s += 3 + msgp.ArrayHeaderSize + (len((*z).PositionsToReveal) * (msgp.Uint64Size))
return
}
// MsgIsZero returns whether this is a zero value
-func (z *coinChoice) MsgIsZero() bool {
- return ((*z).J == 0) && ((*z).SignedWeight == 0) && ((*z).ProvenWeight == 0) && ((*z).Sigcom.MsgIsZero()) && ((*z).Partcom.MsgIsZero()) && ((*z).MsgHash.MsgIsZero())
+func (z *StateProof) MsgIsZero() bool {
+ return ((*z).SigCommit.MsgIsZero()) && ((*z).SignedWeight == 0) && ((*z).SigProofs.MsgIsZero()) && ((*z).PartProofs.MsgIsZero()) && ((*z).MerkleSignatureSaltVersion == 0) && (len((*z).Reveals) == 0) && (len((*z).PositionsToReveal) == 0)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/crypto/compactcert/msgp_gen_test.go b/crypto/stateproof/msgp_gen_test.go
index 1e682b46e..30dd0b1d5 100644
--- a/crypto/compactcert/msgp_gen_test.go
+++ b/crypto/stateproof/msgp_gen_test.go
@@ -1,7 +1,7 @@
//go:build !skip_msgp_testing
// +build !skip_msgp_testing
-package compactcert
+package stateproof
// Code generated by github.com/algorand/msgp DO NOT EDIT.
@@ -14,9 +14,9 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-func TestMarshalUnmarshalCert(t *testing.T) {
+func TestMarshalUnmarshalMessageHash(t *testing.T) {
partitiontest.PartitionTest(t)
- v := Cert{}
+ v := MessageHash{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -35,12 +35,12 @@ func TestMarshalUnmarshalCert(t *testing.T) {
}
}
-func TestRandomizedEncodingCert(t *testing.T) {
- protocol.RunEncodingTest(t, &Cert{})
+func TestRandomizedEncodingMessageHash(t *testing.T) {
+ protocol.RunEncodingTest(t, &MessageHash{})
}
-func BenchmarkMarshalMsgCert(b *testing.B) {
- v := Cert{}
+func BenchmarkMarshalMsgMessageHash(b *testing.B) {
+ v := MessageHash{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -48,8 +48,8 @@ func BenchmarkMarshalMsgCert(b *testing.B) {
}
}
-func BenchmarkAppendMsgCert(b *testing.B) {
- v := Cert{}
+func BenchmarkAppendMsgMessageHash(b *testing.B) {
+ v := MessageHash{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -60,68 +60,8 @@ func BenchmarkAppendMsgCert(b *testing.B) {
}
}
-func BenchmarkUnmarshalCert(b *testing.B) {
- v := Cert{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalCompactOneTimeSignature(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := CompactOneTimeSignature{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingCompactOneTimeSignature(t *testing.T) {
- protocol.RunEncodingTest(t, &CompactOneTimeSignature{})
-}
-
-func BenchmarkMarshalMsgCompactOneTimeSignature(b *testing.B) {
- v := CompactOneTimeSignature{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgCompactOneTimeSignature(b *testing.B) {
- v := CompactOneTimeSignature{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalCompactOneTimeSignature(b *testing.B) {
- v := CompactOneTimeSignature{}
+func BenchmarkUnmarshalMessageHash(b *testing.B) {
+ v := MessageHash{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -194,9 +134,9 @@ func BenchmarkUnmarshalReveal(b *testing.B) {
}
}
-func TestMarshalUnmarshalcoinChoice(t *testing.T) {
+func TestMarshalUnmarshalStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
- v := coinChoice{}
+ v := StateProof{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -215,12 +155,12 @@ func TestMarshalUnmarshalcoinChoice(t *testing.T) {
}
}
-func TestRandomizedEncodingcoinChoice(t *testing.T) {
- protocol.RunEncodingTest(t, &coinChoice{})
+func TestRandomizedEncodingStateProof(t *testing.T) {
+ protocol.RunEncodingTest(t, &StateProof{})
}
-func BenchmarkMarshalMsgcoinChoice(b *testing.B) {
- v := coinChoice{}
+func BenchmarkMarshalMsgStateProof(b *testing.B) {
+ v := StateProof{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -228,8 +168,8 @@ func BenchmarkMarshalMsgcoinChoice(b *testing.B) {
}
}
-func BenchmarkAppendMsgcoinChoice(b *testing.B) {
- v := coinChoice{}
+func BenchmarkAppendMsgStateProof(b *testing.B) {
+ v := StateProof{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -240,8 +180,8 @@ func BenchmarkAppendMsgcoinChoice(b *testing.B) {
}
}
-func BenchmarkUnmarshalcoinChoice(b *testing.B) {
- v := coinChoice{}
+func BenchmarkUnmarshalStateProof(b *testing.B) {
+ v := StateProof{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
diff --git a/crypto/compactcert/structs.go b/crypto/stateproof/structs.go
index 9d3aaca16..d8e0b6883 100644
--- a/crypto/compactcert/structs.go
+++ b/crypto/stateproof/structs.go
@@ -14,35 +14,41 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
+ "fmt"
+ "strconv"
+ "strings"
+
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
)
-// Params defines common parameters for the verifier and builder.
-type Params struct {
- Msg crypto.Hashable // Message to be certified
- ProvenWeight uint64 // Weight threshold proven by the certificate
- SigRound basics.Round // The round for which the ephemeral key is committed to
- SecKQ uint64 // Security parameter (k+q) from analysis document
-}
+// MessageHash represents the message that a state proof will attest to.
+type MessageHash [32]byte
-// CompactOneTimeSignature is crypto.OneTimeSignature with omitempty
-type CompactOneTimeSignature struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- merklesignature.Signature
+//msgp:ignore sigslot
+type sigslot struct {
+ // Weight is the weight of the participant signing this message.
+ // This information is tracked here for convenience, but it does
+ // not appear in the commitment to the sigs array; it comes from
+ // the Weight field of the corresponding participant.
+ Weight uint64
+
+ // Include the parts of the sigslot that form the commitment to
+ // the sigs array.
+ sigslotCommit
}
-// A sigslotCommit is a single slot in the sigs array that forms the certificate.
+// A sigslotCommit is a single slot in the sigs array that forms the state proof.
type sigslotCommit struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// Sig is a signature by the participant on the expected message.
- Sig CompactOneTimeSignature `codec:"s"`
+ Sig merklesignature.Signature `codec:"s"`
// L is the total weight of signatures in lower-numbered slots.
// This is initialized once the builder has collected a sufficient
@@ -50,8 +56,8 @@ type sigslotCommit struct {
L uint64 `codec:"l"`
}
-// Reveal is a single array position revealed as part of a compact
-// certificate. It reveals an element of the signature array and
+// Reveal is a single array position revealed as part of a state
+// proof. It reveals an element of the signature array and
// the corresponding element of the participants array.
type Reveal struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
@@ -60,19 +66,42 @@ type Reveal struct {
Part basics.Participant `codec:"p"`
}
-// Cert represents a compact certificate.
-type Cert struct {
+// StateProof represents a proof on Algorand's state.
+type StateProof struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- SigCommit crypto.GenericDigest `codec:"c"`
- SignedWeight uint64 `codec:"w"`
- SigProofs merklearray.Proof `codec:"S"`
- PartProofs merklearray.Proof `codec:"P"`
-
+ SigCommit crypto.GenericDigest `codec:"c"`
+ SignedWeight uint64 `codec:"w"`
+ SigProofs merklearray.Proof `codec:"S"`
+ PartProofs merklearray.Proof `codec:"P"`
+ MerkleSignatureSaltVersion byte `codec:"v"`
// Reveals is a sparse map from the position being revealed
// to the corresponding elements from the sigs and participants
// arrays.
- Reveals map[uint64]Reveal `codec:"r,allocbound=MaxReveals"`
+ Reveals map[uint64]Reveal `codec:"r,allocbound=MaxReveals"`
+ PositionsToReveal []uint64 `codec:"pr,allocbound=MaxReveals"`
+}
+
+func (s StateProof) stringBuild() (b strings.Builder) {
+ b.WriteString("StateProof: {")
+ defer b.WriteRune('}')
+
+ if s.MsgIsZero() {
+ return
+ }
+
+ b.WriteString(fmt.Sprintf("%v", s.SigCommit))
+ b.WriteString(", ")
+ b.WriteString(strconv.FormatUint(s.SignedWeight, 10))
+ b.WriteString(", ")
+ b.WriteString(strconv.Itoa(len(s.PositionsToReveal)))
+
+ return
+}
+
+func (s StateProof) String() string {
+ b := s.stringBuild()
+ return b.String()
}
// SortUint64 implements sorting by uint64 keys for
diff --git a/crypto/stateproof/verifier.go b/crypto/stateproof/verifier.go
new file mode 100644
index 000000000..892c9d477
--- /dev/null
+++ b/crypto/stateproof/verifier.go
@@ -0,0 +1,154 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+)
+
+// Errors for the StateProof verifier
+var (
+ ErrCoinNotInRange = errors.New("coin is not within slot weight range")
+ ErrNoRevealInPos = errors.New("no reveal for position")
+ ErrTreeDepthTooLarge = errors.New("tree depth is too large")
+)
+
+// Verifier is used to verify a state proof. those fields represent all the verifier's trusted data
+type Verifier struct {
+ strengthTarget uint64
+ lnProvenWeight uint64 // ln(provenWeight) as integer with 16 bits of precision
+ participantsCommitment crypto.GenericDigest
+}
+
+// MkVerifier constructs a verifier to check the state proof. the arguments for this function
+// represent all the verifier's trusted data
+func MkVerifier(partcom crypto.GenericDigest, provenWeight uint64, strengthTarget uint64) (*Verifier, error) {
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Verifier{
+ strengthTarget: strengthTarget,
+ lnProvenWeight: lnProvenWt,
+ participantsCommitment: partcom,
+ }, nil
+}
+
+// MkVerifierWithLnProvenWeight constructs a verifier to check the state proof. the arguments for this function
+// represent all the verifier's trusted data. This function uses the Ln(provenWeight) approximation value
+func MkVerifierWithLnProvenWeight(partcom crypto.GenericDigest, lnProvenWt uint64, strengthTarget uint64) *Verifier {
+ return &Verifier{
+ strengthTarget: strengthTarget,
+ lnProvenWeight: lnProvenWt,
+ participantsCommitment: partcom,
+ }
+}
+
+// Verify checks if s is a valid state proof for the data on a round.
+// it uses the trusted data from the Verifier struct
+func (v *Verifier) Verify(round uint64, data MessageHash, s *StateProof) error {
+ if err := verifyStateProofTreesDepth(s); err != nil {
+ return err
+ }
+
+ nr := uint64(len(s.PositionsToReveal))
+ if err := verifyWeights(s.SignedWeight, v.lnProvenWeight, nr, v.strengthTarget); err != nil {
+ return err
+ }
+
+ version := s.MerkleSignatureSaltVersion
+ for _, reveal := range s.Reveals {
+ if err := reveal.SigSlot.Sig.ValidateSaltVersion(version); err != nil {
+ return err
+ }
+ }
+
+ sigs := make(map[uint64]crypto.Hashable)
+ parts := make(map[uint64]crypto.Hashable)
+
+ for pos, r := range s.Reveals {
+ sig, err := buildCommittableSignature(r.SigSlot)
+ if err != nil {
+ return err
+ }
+
+ sigs[pos] = sig
+ parts[pos] = r.Part
+
+ // verify that the msg and the signature is valid under the given participant's Pk
+ err = r.Part.PK.VerifyBytes(
+ round,
+ data[:],
+ &r.SigSlot.Sig,
+ )
+
+ if err != nil {
+ return fmt.Errorf("signature in reveal pos %d does not verify. error is %w", pos, err)
+ }
+ }
+
+ // verify all the reveals proofs on the signature commitment.
+ if err := merklearray.VerifyVectorCommitment(s.SigCommit[:], sigs, &s.SigProofs); err != nil {
+ return err
+ }
+
+ // verify all the reveals proofs on the participant commitment.
+ if err := merklearray.VerifyVectorCommitment(v.participantsCommitment[:], parts, &s.PartProofs); err != nil {
+ return err
+ }
+
+ choice := coinChoiceSeed{
+ partCommitment: v.participantsCommitment,
+ lnProvenWeight: v.lnProvenWeight,
+ sigCommitment: s.SigCommit,
+ signedWeight: s.SignedWeight,
+ data: data,
+ }
+
+ coinHash := makeCoinGenerator(&choice)
+ for j := uint64(0); j < nr; j++ {
+ pos := s.PositionsToReveal[j]
+ reveal, exists := s.Reveals[pos]
+ if !exists {
+ return fmt.Errorf("%w: %d", ErrNoRevealInPos, pos)
+ }
+
+ coin := coinHash.getNextCoin()
+ if !(reveal.SigSlot.L <= coin && coin < reveal.SigSlot.L+reveal.Part.Weight) {
+ return fmt.Errorf("%w: for reveal pos %d and coin %d, ", ErrCoinNotInRange, pos, coin)
+ }
+ }
+
+ return nil
+}
+
+func verifyStateProofTreesDepth(s *StateProof) error {
+ if s.SigProofs.TreeDepth > MaxTreeDepth {
+ return fmt.Errorf("%w. sigTree depth is %d", ErrTreeDepthTooLarge, s.SigProofs.TreeDepth)
+ }
+
+ if s.PartProofs.TreeDepth > MaxTreeDepth {
+ return fmt.Errorf("%w. partTree depth is %d", ErrTreeDepthTooLarge, s.PartProofs.TreeDepth)
+ }
+
+ return nil
+}
diff --git a/crypto/stateproof/verifier_test.go b/crypto/stateproof/verifier_test.go
new file mode 100644
index 000000000..91ee12311
--- /dev/null
+++ b/crypto/stateproof/verifier_test.go
@@ -0,0 +1,180 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestVerifyRevelForEachPosition(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ p := generateProofForTesting(a, false)
+ sProof := p.sp
+
+ verifier, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.NoError(err)
+
+ for i := uint64(0); i < p.numberOfParticipnets; i++ {
+ _, ok := sProof.Reveals[i]
+ if !ok {
+ sProof.PositionsToReveal[0] = i
+ break
+ }
+ }
+
+ verifier, err = MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.ErrorIs(err, ErrNoRevealInPos)
+
+}
+
+// TestVerifyWrongCoinSlot this test makes sure that the verifier uses PositionsToReveal array, and opens reveals in a specific order
+// In order to and trick the verifier we need to swap two positions in the PositionsToReveal so the coins will not match.
+func TestVerifyWrongCoinSlot(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ p := generateProofForTesting(a, false)
+ sProof := p.sp
+ verifier, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.NoError(err)
+
+ // we need to find a reveal that will not match the first coin.
+ // In order to accomplish that we will extract the first coin and find a reveals ( > 1, since index 0 will satisfy the verifier)
+ // that doesn't match
+ coinAt0 := sProof.PositionsToReveal[0]
+ choice := coinChoiceSeed{
+ partCommitment: verifier.participantsCommitment,
+ lnProvenWeight: verifier.lnProvenWeight,
+ sigCommitment: sProof.SigCommit,
+ signedWeight: sProof.SignedWeight,
+ data: p.data,
+ }
+ coinHash := makeCoinGenerator(&choice)
+ coin := coinHash.getNextCoin()
+ j := 1
+ for ; j < len(sProof.PositionsToReveal); j++ {
+ reveal := sProof.Reveals[sProof.PositionsToReveal[j]]
+ if !(reveal.SigSlot.L <= coin && coin < reveal.SigSlot.L+reveal.Part.Weight) {
+ break
+ }
+ }
+
+ sProof.PositionsToReveal[0] = sProof.PositionsToReveal[j]
+ sProof.PositionsToReveal[j] = coinAt0
+
+ verifier, err = MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.ErrorIs(err, ErrCoinNotInRange)
+
+}
+
+func TestVerifyBadSignature(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ p := generateProofForTesting(a, false)
+ sProof := p.sp
+
+ verifier, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.NoError(err)
+
+ key := generateTestSigner(0, uint64(stateProofIntervalForTests)*20+1, stateProofIntervalForTests, a)
+ signerInRound := key.GetSigner(stateProofIntervalForTests)
+ newSig, err := signerInRound.SignBytes([]byte{0x1, 0x2})
+ a.NoError(err)
+
+ rev := sProof.Reveals[0]
+ rev.SigSlot.Sig = newSig
+ sProof.Reveals[0] = rev
+
+ verifier, err = MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.ErrorIs(err, merklesignature.ErrSignatureSchemeVerificationFailed)
+
+}
+
+func TestVerifyZeroProvenWeight(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ partcommit := crypto.GenericDigest{}
+ _, err := MkVerifier(partcommit, 0, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrIllegalInputForLnApprox)
+}
+
+func TestEqualVerifiers(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ p := generateProofForTesting(a, false)
+ sProof := p.sp
+
+ verifier, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+ err = verifier.Verify(stateProofIntervalForTests, p.data, &sProof)
+ a.NoError(err)
+
+ lnProvenWeight, err := LnIntApproximation(p.provenWeight)
+ verifierLnP := MkVerifierWithLnProvenWeight(p.partCommitment, lnProvenWeight, stateProofStrengthTargetForTests)
+
+ a.Equal(verifierLnP, verifier)
+}
+
+func TestTreeDepth(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ p := generateProofForTesting(a, false)
+ sProof := p.sp
+
+ verifier, err := MkVerifier(p.partCommitment, p.provenWeight, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ tmp := sProof.PartProofs.TreeDepth
+ sProof.PartProofs.TreeDepth = MaxTreeDepth + 1
+ a.ErrorIs(verifier.Verify(stateProofIntervalForTests, p.data, &sProof), ErrTreeDepthTooLarge)
+ sProof.PartProofs.TreeDepth = tmp
+
+ tmp = sProof.SigProofs.TreeDepth
+ sProof.SigProofs.TreeDepth = MaxTreeDepth + 1
+ a.ErrorIs(verifier.Verify(stateProofIntervalForTests, p.data, &sProof), ErrTreeDepthTooLarge)
+ sProof.SigProofs.TreeDepth = tmp
+
+ a.NoError(verifier.Verify(stateProofIntervalForTests, p.data, &sProof))
+}
diff --git a/crypto/stateproof/weights.go b/crypto/stateproof/weights.go
new file mode 100644
index 000000000..8d0bdd13d
--- /dev/null
+++ b/crypto/stateproof/weights.go
@@ -0,0 +1,193 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "errors"
+ "math"
+ "math/big"
+ "math/bits"
+)
+
+// errors for the weights verification
+var (
+ ErrSignedWeightLessThanProvenWeight = errors.New("signed weight is less than or equal to proven weight")
+ ErrTooManyReveals = errors.New("too many reveals in state proof")
+ ErrZeroSignedWeight = errors.New("signed weight cannot be zero")
+ ErrIllegalInputForLnApprox = errors.New("cannot calculate a ln integer value for 0")
+ ErrInsufficientSignedWeight = errors.New("the number of reveals is not large enough to prove that the desired weight signed, with the desired security level")
+ ErrNegativeNumOfRevealsEquation = errors.New("state proof creation failed: weights will not be able to satisfy the verification equation")
+)
+
+func bigInt(num uint64) *big.Int {
+ return (&big.Int{}).SetUint64(num)
+}
+
+// LnIntApproximation returns a uint64 approximation
+func LnIntApproximation(x uint64) (uint64, error) {
+ if x == 0 {
+ return 0, ErrIllegalInputForLnApprox
+ }
+ result := math.Log(float64(x))
+ precision := uint64(1 << precisionBits)
+ expandWithPrecision := result * float64(precision)
+ return uint64(math.Ceil(expandWithPrecision)), nil
+
+}
+
+// verifyWeights makes sure that the number of reveals in the state proof is correct with respect
+// to the signedWeight and a provenWeight upper bound.
+// This function checks that the following inequality is satisfied
+//
+// numReveals * (3 * 2^b * (signedWeight^2 - 2^2d) + d * (T-1) * Y) >= ((strengthTarget) * T + numReveals * P) * Y
+//
+// where signedWeight/(2^d) >=1 for some integer d>=0, p = P/(2^b) >= ln(provenWeight), t = T/(2^b) >= ln(2) >= (T-1)/(2^b)
+// for some integers P,T >= 0 and b=16.
+//
+// T and b are defined in the code as the constants ln2IntApproximation and precisionBits respectively.
+// P is set to lnProvenWeight argument
+// more details can be found on the Algorand's spec
+func verifyWeights(signedWeight uint64, lnProvenWeight uint64, numOfReveals uint64, strengthTarget uint64) error {
+ if numOfReveals > MaxReveals {
+ return ErrTooManyReveals
+ }
+
+ if signedWeight == 0 {
+ return ErrZeroSignedWeight
+ }
+
+ // in order to make the code more readable and reusable we will define the following expressions:
+ // y = signedWeight^2 + 2^(d + 2) * signedWeight + 2^2d
+ // x = 3 * 2^b * (signedWeight^2 - 2^2d)
+ // w = d * (T - 1)
+ //
+ // numReveals * (3 * 2^b * (signedWeight^2 - 2^2d) + d * (T-1) * Y) >= ((strengthTarget) * T + numReveals * P) * Y
+ // /\
+ // ||
+ // \/
+ // numReveals * (x + w * y) >= ((strengthTarget) * T + numReveals * P) * y
+ y, x, w := getSubExpressions(signedWeight)
+ lhs := &big.Int{}
+ lhs.Set(w).
+ Mul(lhs, y).
+ Add(x, lhs).
+ Mul(bigInt(numOfReveals), lhs)
+
+ revealsTimesP := &big.Int{}
+ revealsTimesP.Set(bigInt(numOfReveals)).Mul(revealsTimesP, bigInt(lnProvenWeight))
+
+ rhs := &big.Int{}
+ rhs.Set(bigInt(strengthTarget))
+ rhs.Mul(rhs, bigInt(ln2IntApproximation)).
+ Add(rhs, revealsTimesP).
+ Mul(rhs, y)
+
+ if lhs.Cmp(rhs) < 0 {
+ return ErrInsufficientSignedWeight
+ }
+
+ return nil
+}
+
+// numReveals computes the number of reveals necessary to achieve the desired
+// security target. We search for small integer that will satisfy the verification
+// inequality checked by the verifyWeights function.
+// In order to make sure the number will satisfy the verifier we will use the following inequality
+//
+// numReveals >= ((strengthTarget) * T * Y / (3 * 2^b * (signedWeight^2 - 2^2d) + (d * (T - 1) - P) * Y))
+// where signedWeight/(2^d) >=1 for some integer d>=0, p = P/(2^b) >= ln(provenWeight), t = T/(2^b) >= ln(2) >= (T-1)/(2^b)
+// for some integers P,T >= 0 and b=16.
+//
+// T and b are defined in the code as the constants ln2IntApproximation and precisionBits respectively,
+// and P is set to lnProvenWeight argument.
+//
+//
+// more details can be found on the Algorand's spec
+func numReveals(signedWeight uint64, lnProvenWeight uint64, strengthTarget uint64) (uint64, error) {
+ // in order to make the code more readable and reusable we will define the following expressions:
+ // y = signedWeight^2 + 2^(d + 2) * signedWeight + 2^2d
+ // x = 3 * 2^b * (signedWeight^2 - 2^2d)
+ // w = d * (T - 1)
+ //
+ // numReveals >= ((strengthTarget) * T * Y / (3 * 2^b * (signedWeight^2 - 2^2d) + (d * (T - 1) - P) * Y))
+ // /\
+ // ||
+ // \/
+ // numReveals >= ((strengthTarget) * T * y / (x + (w - P) * y))
+ y, x, w := getSubExpressions(signedWeight)
+
+ // numerator = strengthTarget * ln2IntApproximation * y
+ numerator := bigInt(strengthTarget)
+ numerator.Mul(numerator, bigInt(ln2IntApproximation)).
+ Mul(numerator, y)
+
+ // denom = x + (w - lnProvenWeight) * y
+ denom := w
+ denom.Sub(denom, bigInt(lnProvenWeight)).
+ Mul(denom, y).
+ Add(x, denom)
+
+ if denom.Sign() <= 0 {
+ return 0, ErrNegativeNumOfRevealsEquation
+ }
+
+ // numberReveals = (numerator / denom) + 1
+ // by adding 1 we guarantee that the return value satisfy the inequality and therefore
+ // will satisfy the verifier.
+ // + 1 to account for the decimal point value loss due to integer division
+ res := numerator.Div(numerator, denom).Uint64() + 1
+ if res > MaxReveals {
+ return 0, ErrTooManyReveals
+ }
+ return res, nil
+}
+
+// getSubExpressions calculate the following expression to make the code more readable and reusable
+// y = signedWeight^2 + 2^(d + 2) * signedWeight + 2^2d
+// x = 3 * 2^b * (signedWeight^2 - 2^2d)
+// w = d * (T - 1)
+func getSubExpressions(signedWeight uint64) (y *big.Int, x *big.Int, w *big.Int) {
+ // find d s.t 2^(d+1) >= signedWeight >= 2^(d)
+ d := uint(bits.Len64(signedWeight)) - 1
+
+ signedWtPower2 := bigInt(signedWeight)
+ signedWtPower2.Mul(signedWtPower2, signedWtPower2)
+
+ //tmp = 2^(d+2)*signedWt
+ tmp := bigInt(1)
+ tmp.Lsh(tmp, d+2).
+ Mul(tmp, bigInt(signedWeight))
+
+ // Y = signedWeight^2 + 2^(d+2)*signedWeight +2^2d == signedWeight^2 + tmp +2^2d
+ y = bigInt(1)
+ y.Lsh(y, 2*d).
+ Add(y, tmp).
+ Add(y, signedWtPower2)
+
+ // x = 3*2^b*(signedWeight^2-2^2d)
+ x = bigInt(1)
+ x.Lsh(x, 2*d).
+ Sub(signedWtPower2, x).
+ Mul(x, bigInt(3)).
+ Mul(x, bigInt(1<<precisionBits))
+
+ // w = d*(T-1)
+ w = bigInt(uint64(d))
+ w.Mul(w, bigInt(ln2IntApproximation-1))
+
+ return
+}
diff --git a/crypto/stateproof/weights_test.go b/crypto/stateproof/weights_test.go
new file mode 100644
index 000000000..d7a694897
--- /dev/null
+++ b/crypto/stateproof/weights_test.go
@@ -0,0 +1,226 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "fmt"
+ "math"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMaxNumberOfRevealsInVerify(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signedWeight := uint64(10)
+ provenWeight := uint64(10)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ err = verifyWeights(signedWeight, lnProvenWt, MaxReveals+1, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrTooManyReveals)
+}
+
+func TestMaxNumberOfReveals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signedWeight := uint64(1<<10 + 1)
+ provenWeight := uint64(1 << 10)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ _, err = numReveals(signedWeight, lnProvenWt, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrTooManyReveals)
+}
+
+func TestVerifyProvenWeight(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signedWeight := uint64(1 << 11)
+ provenWeight := uint64(1 << 10)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ numOfReveals, err := numReveals(signedWeight, lnProvenWt, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifyWeights(signedWeight, lnProvenWt, numOfReveals, stateProofStrengthTargetForTests)
+ a.NoError(err)
+
+ err = verifyWeights(signedWeight, lnProvenWt, numOfReveals-1, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrInsufficientSignedWeight)
+}
+
+func TestVerifyZeroNumberOfRevealsEquation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signedWeight := uint64(1<<15 + 1)
+ provenWeight := uint64(1 << 15)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ _, err = numReveals(signedWeight, lnProvenWt, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrNegativeNumOfRevealsEquation)
+}
+
+func TestLnWithPrecision(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ val, err := LnIntApproximation(2)
+ a.NoError(err)
+
+ // check that precisionBits will not overflow
+ exp := 1 << precisionBits
+ a.Less(precisionBits, uint8(64))
+
+ a.GreaterOrEqual(float64(val)/float64(exp), math.Log(2))
+ a.Greater(math.Log(2), float64(val-1)/float64(exp))
+
+ ln2, err := LnIntApproximation(2)
+ a.NoError(err)
+ a.Equal(ln2IntApproximation, ln2)
+}
+
+func TestVerifyLimits(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ signedWeight := uint64(0)
+ provenWeight := uint64(1<<10 - 1)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ err = verifyWeights(signedWeight, lnProvenWt, MaxReveals-1, stateProofStrengthTargetForTests)
+ a.ErrorIs(err, ErrZeroSignedWeight)
+}
+
+func TestNumRevealsApproxBound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ // In order to create a valid state proof we need to be bound to a MaxNumberOfReveals.
+ // according to SNARK-friendly weight-verification formula there would be a ratio signedWt/provenWt > 1
+ // that we would not be able to generate proof since the MaxReveals would be too high.
+ // This test points out on the minimal ratio signedWt/provenWt we would ever prdouce.
+
+ for j := 0; j < 10; j++ {
+ sigWt := uint64(1<<(40-j) - 1)
+ // we check the ratios = signedWt/provenWt {3, 2.99, 2.98...1}
+ // ratio = 1.33 (i==167) would give 625 would be the lower bound we can expect
+ for i := 0; i < 168; i++ {
+ a.NoError(checkRatio(i, sigWt, stateProofStrengthTargetForTests))
+ }
+ a.ErrorIs(checkRatio(168, sigWt, stateProofStrengthTargetForTests), ErrTooManyReveals)
+ }
+}
+
+func checkRatio(i int, sigWt uint64, secParam uint64) error {
+ provenWtRatio := 3 - (float64(i) / 100)
+ provenWt := uint64(float64(sigWt) / (provenWtRatio))
+ lnProvenWt, err := LnIntApproximation(provenWt)
+ if err != nil {
+ return err
+ }
+
+ numOfReveals, err := numReveals(sigWt, lnProvenWt, secParam)
+ if err != nil {
+ return fmt.Errorf("failed on sigWt %v provenWt %d ratio is %v i %v err: %w", sigWt, provenWt, provenWtRatio, i, err)
+ }
+
+ log2Sig := math.Log(float64(sigWt)) / math.Log(2)
+ log2Prov := math.Log(float64(provenWt)) / math.Log(2)
+ nr := float64(secParam) / (log2Sig - log2Prov)
+ if 1.01 < float64(numOfReveals)/nr {
+ return fmt.Errorf("approximated number of reveals exceeds limit "+
+ "limit %v, signedWeight: %v provenWeight %v, "+
+ "appox numberOfReveals: %v, real numberOfReveals %v ratio is %v", 1.01, sigWt, provenWt, numOfReveals, nr, provenWtRatio)
+ }
+ return nil
+}
+
+func TestNumReveals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ billion := uint64(1000 * 1000 * 1000)
+ microalgo := uint64(1000 * 1000)
+ provenWeight := 2 * billion * microalgo
+ strengthTarget := uint64(stateProofStrengthTargetForTests)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ a.NoError(err)
+
+ for i := uint64(3); i < 10; i++ {
+ signedWeight := i * billion * microalgo
+ n, err := numReveals(signedWeight, lnProvenWt, strengthTarget)
+ a.NoError(err)
+ if n < 50 || n > 500 {
+ t.Errorf("numReveals(%d, %d, %d) = %d looks suspect",
+ signedWeight, provenWeight, strengthTarget, n)
+ }
+
+ err = verifyWeights(signedWeight, lnProvenWt, n, stateProofStrengthTargetForTests)
+ a.NoError(err)
+ }
+}
+
+func BenchmarkVerifyWeights(b *testing.B) {
+ billion := uint64(1000 * 1000 * 1000)
+ microalgo := uint64(1000 * 1000)
+ provenWeight := 100 * billion * microalgo
+ signedWeight := 110 * billion * microalgo
+ strengthTarget := uint64(stateProofStrengthTargetForTests)
+
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ require.NoError(b, err)
+
+ nr, err := numReveals(signedWeight, lnProvenWt, strengthTarget)
+ if nr < 900 {
+ b.Errorf("numReveals(%d, %d, %d) = %d < 900", signedWeight, provenWeight, strengthTarget, nr)
+ }
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ verifyWeights(signedWeight, lnProvenWt, nr, strengthTarget)
+ }
+}
+
+func BenchmarkNumReveals(b *testing.B) {
+ billion := uint64(1000 * 1000 * 1000)
+ microalgo := uint64(1000 * 1000)
+ provenWeight := 100 * billion * microalgo
+ signedWeight := 110 * billion * microalgo
+ strengthTarget := uint64(stateProofStrengthTargetForTests)
+ lnProvenWt, err := LnIntApproximation(provenWeight)
+ require.NoError(b, err)
+
+ nr, err := numReveals(signedWeight, lnProvenWt, strengthTarget)
+ if nr < 900 {
+ b.Errorf("numReveals(%d, %d, %d) = %d < 900", signedWeight, provenWeight, strengthTarget, nr)
+ }
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ numReveals(signedWeight, lnProvenWt, strengthTarget)
+ }
+}
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 8e2c69e54..c95c0dbd1 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -531,8 +531,8 @@
"schemes": [
"http"
],
- "summary": "Get a Merkle proof for a transaction in a block.",
- "operationId": "GetProof",
+ "summary": "Get a proof for a transaction in a block.",
+ "operationId": "GetTransactionProof",
"parameters": [
{
"type": "integer",
@@ -566,7 +566,7 @@
],
"responses": {
"200": {
- "$ref": "#/responses/ProofResponse"
+ "$ref": "#/responses/TransactionProofResponse"
},
"400": {
"description": "Malformed round number or transaction ID",
@@ -587,7 +587,7 @@
}
},
"500": {
- "description": "Internal error, including protocol not supporting Merkle proofs.",
+ "description": "Internal error, including protocol not supporting proofs.",
"schema": {
"$ref": "#/definitions/ErrorResponse"
}
@@ -1246,6 +1246,130 @@
}
]
},
+ "/v2/stateproofs/{round}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get a state proof that covers a given round",
+ "operationId": "GetStateProof",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "The round for which a state proof is desired.",
+ "name": "round",
+ "in": "path",
+ "required": true,
+ "minimum": 0
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/StateProofResponse"
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Could not find a state proof that covers a given round",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "503": {
+ "description": "Service Temporarily Unavailable",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "round",
+ "in": "path",
+ "required": true
+ }
+ ]
+ },
+ "/v2/blocks/{round}/lightheader/proof": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Gets a proof for a given light block header inside a state proof commitment",
+ "operationId": "GetLightBlockHeaderProof",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "The round to which the light block header belongs.",
+ "name": "round",
+ "in": "path",
+ "required": true,
+ "minimum": 0
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/LightBlockHeaderProofResponse"
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Could not create proof since some data is missing",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "503": {
+ "description": "Service Temporarily Unavailable",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "round",
+ "in": "path",
+ "required": true
+ }
+ ]
+ },
"/v2/applications/{application-id}": {
"get": {
"description": "Given a application ID, it returns application information including creator, approval and clear programs, global and local schemas, and global state.",
@@ -1306,7 +1430,7 @@
"name": "application-id",
"in": "path",
"required": true
- }
+ }
]
},
"/v2/assets/{asset-id}": {
@@ -1369,7 +1493,7 @@
"name": "asset-id",
"in": "path",
"required": true
- }
+ }
]
},
"/v2/teal/compile": {
@@ -1396,7 +1520,7 @@
"type": "string",
"format": "binary"
}
- },
+ },
{
"name": "sourcemap",
"description": "When set to `true`, returns the source map of the program as a JSON. Defaults to `false`.",
@@ -1851,8 +1975,8 @@
"properties": {
"amount": {
"description": "\\[a\\] number of units held.",
- "type": "integer",
- "x-algorand-format": "uint64"
+ "type": "integer",
+ "x-algorand-format": "uint64"
},
"asset-id": {
"description": "Asset ID of the holding.",
@@ -2349,7 +2473,7 @@
"accounts": {
"type": "array",
"items": {
- "$ref": "#/definitions/Account"
+ "$ref": "#/definitions/Account"
}
},
"apps": {
@@ -2484,7 +2608,7 @@
"asset-index": {
"description": "The asset index if the transaction was found and it created an asset.",
"type": "integer"
- },
+ },
"application-index": {
"description": "The application index if the transaction was found and it created an application.",
"type": "integer"
@@ -2545,10 +2669,87 @@
},
"txn": {
"description": "The raw signed transaction.",
- "type": "object",
+ "type": "object",
"x-algorand-format": "SignedTransaction"
}
}
+ },
+ "StateProof": {
+ "description": "Represents a state proof and its corresponding message",
+ "type": "object",
+ "required": [
+ "Message",
+ "StateProof"
+ ],
+ "properties": {
+ "Message": {
+ "description": "Represents the message that the state proofs are attesting to.",
+ "type": "object",
+ "required": [
+ "BlockHeadersCommitment",
+ "VotersCommitment",
+ "LnProvenWeight",
+ "FirstAttestedRound",
+ "LastAttestedRound"
+ ],
+ "properties": {
+ "BlockHeadersCommitment": {
+ "description": "The vector commitment root on all light block headers within a state proof interval.",
+ "type": "string",
+ "format": "byte"
+ },
+ "VotersCommitment": {
+ "description": "The vector commitment root of the top N accounts to sign the next StateProof.",
+ "type": "string",
+ "format": "byte"
+ },
+ "LnProvenWeight": {
+ "description": "An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "FirstAttestedRound": {
+ "description": "The first round the message attests to.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "LastAttestedRound": {
+ "description": "The last round the message attests to.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ }
+ }
+ },
+ "StateProof": {
+ "description": "The encoded StateProof for the message.",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
+ "LightBlockHeaderProof": {
+ "description": "Proof of membership and position of a light block header.",
+ "type": "object",
+ "required": [
+ "index",
+ "treedepth",
+ "proof"
+ ],
+ "properties": {
+ "index": {
+ "description": "The index of the light block header in the vector commitment tree",
+ "type": "integer"
+ },
+ "treedepth": {
+ "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.",
+ "type": "integer"
+ },
+ "proof": {
+ "description": "The encoded proof.",
+ "type": "string",
+ "format": "byte"
+ }
+ }
}
},
"parameters": {
@@ -2714,7 +2915,8 @@
"acfg",
"axfer",
"afrz",
- "appl"
+ "appl",
+ "stpf"
],
"type": "string",
"name": "tx-type",
@@ -2722,6 +2924,18 @@
}
},
"responses": {
+ "LightBlockHeaderProofResponse": {
+ "description": "Proof of a light block header.",
+ "schema": {
+ "$ref": "#/definitions/LightBlockHeaderProof"
+ }
+ },
+ "StateProofResponse": {
+ "description": "StateProofResponse wraps the StateProof type in a response.",
+ "schema": {
+ "$ref": "#/definitions/StateProof"
+ }
+ },
"AccountResponse": {
"description": "AccountResponse wraps the Account type in a response.",
"schema": {
@@ -2762,7 +2976,7 @@
"round": {
"description": "The round for which this information is relevant.",
"type": "integer"
- },
+ },
"app-local-state": {
"description": "\\[appl\\] the application local data stored in this account.\n\nThe raw account uses `AppLocalState` for this type.",
"$ref": "#/definitions/ApplicationLocalState"
@@ -2795,7 +3009,7 @@
}
}
},
- "ProofResponse": {
+ "TransactionProofResponse": {
"description": "Proof of transaction in a block.",
"schema": {
"type": "object",
@@ -2808,7 +3022,7 @@
],
"properties": {
"proof": {
- "description": "Merkle proof of transaction membership.",
+ "description": "Proof of transaction membership.",
"type": "string",
"format": "byte"
},
@@ -2854,7 +3068,7 @@
}
}
},
- "CatchpointAbortResponse":{
+ "CatchpointAbortResponse": {
"tags": [
"private"
],
@@ -2990,12 +3204,12 @@
"$ref": "#/definitions/ParticipationKey"
}
},
- "PostParticipationResponse" : {
+ "PostParticipationResponse": {
"description": "Participation ID of the submission",
"schema": {
"type": "object",
"required": [
- "partId"
+ "partId"
],
"properties": {
"partId": {
@@ -3004,9 +3218,7 @@
}
}
}
-
},
-
"PostTransactionsResponse": {
"description": "Transaction ID of the submission.",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 0fa09dd55..44d4d1b95 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -216,7 +216,8 @@
"acfg",
"axfer",
"afrz",
- "appl"
+ "appl",
+ "stpf"
],
"type": "string"
}
@@ -446,6 +447,16 @@
},
"description": "DryrunResponse contains per-txn debug information from a dryrun."
},
+ "LightBlockHeaderProofResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LightBlockHeaderProof"
+ }
+ }
+ },
+ "description": "Proof of a light block header."
+ },
"NodeStatusResponse": {
"content": {
"application/json": {
@@ -619,52 +630,15 @@
},
"description": "Transaction ID of the submission."
},
- "ProofResponse": {
+ "StateProofResponse": {
"content": {
"application/json": {
"schema": {
- "properties": {
- "hashtype": {
- "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256",
- "enum": [
- "sha512_256",
- "sha256"
- ],
- "type": "string"
- },
- "idx": {
- "description": "Index of the transaction in the block's payset.",
- "type": "integer"
- },
- "proof": {
- "description": "Merkle proof of transaction membership.",
- "format": "byte",
- "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
- "type": "string"
- },
- "stibhash": {
- "description": "Hash of SignedTxnInBlock for verifying proof.",
- "format": "byte",
- "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
- "type": "string"
- },
- "treedepth": {
- "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.",
- "type": "integer"
- }
- },
- "required": [
- "hashtype",
- "idx",
- "proof",
- "stibhash",
- "treedepth"
- ],
- "type": "object"
+ "$ref": "#/components/schemas/StateProof"
}
}
},
- "description": "Proof of transaction in a block."
+ "description": "StateProofResponse wraps the StateProof type in a response."
},
"SupplyResponse": {
"content": {
@@ -744,6 +718,53 @@
},
"description": "TransactionParams contains the parameters that help a client construct a new transaction."
},
+ "TransactionProofResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "hashtype": {
+ "description": "The type of hash function used to create the proof, must be one of: \n* sha512_256 \n* sha256",
+ "enum": [
+ "sha512_256",
+ "sha256"
+ ],
+ "type": "string"
+ },
+ "idx": {
+ "description": "Index of the transaction in the block's payset.",
+ "type": "integer"
+ },
+ "proof": {
+ "description": "Proof of transaction membership.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "stibhash": {
+ "description": "Hash of SignedTxnInBlock for verifying proof.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "treedepth": {
+ "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "hashtype",
+ "idx",
+ "proof",
+ "stibhash",
+ "treedepth"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Proof of transaction in a block."
+ },
"VersionsResponse": {
"content": {
"application/json": {
@@ -1443,6 +1464,31 @@
],
"type": "object"
},
+ "LightBlockHeaderProof": {
+ "description": "Proof of membership and position of a light block header.",
+ "properties": {
+ "index": {
+ "description": "The index of the light block header in the vector commitment tree",
+ "type": "integer"
+ },
+ "proof": {
+ "description": "The encoded proof.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "treedepth": {
+ "description": "Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "index",
+ "proof",
+ "treedepth"
+ ],
+ "type": "object"
+ },
"ParticipationKey": {
"description": "Represents a participation key used by the node.",
"properties": {
@@ -1573,6 +1619,62 @@
},
"type": "array"
},
+ "StateProof": {
+ "description": "Represents a state proof and its corresponding message",
+ "properties": {
+ "Message": {
+ "description": "Represents the message that the state proofs are attesting to.",
+ "properties": {
+ "BlockHeadersCommitment": {
+ "description": "The vector commitment root on all light block headers within a state proof interval.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "FirstAttestedRound": {
+ "description": "The first round the message attests to.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "LastAttestedRound": {
+ "description": "The last round the message attests to.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "LnProvenWeight": {
+ "description": "An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "VotersCommitment": {
+ "description": "The vector commitment root of the top N accounts to sign the next StateProof.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "BlockHeadersCommitment",
+ "FirstAttestedRound",
+ "LastAttestedRound",
+ "LnProvenWeight",
+ "VotersCommitment"
+ ],
+ "type": "object"
+ },
+ "StateProof": {
+ "description": "The encoded StateProof for the message.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "Message",
+ "StateProof"
+ ],
+ "type": "object"
+ },
"TealKeyValue": {
"description": "Represents a key-value pair in an application store.",
"properties": {
@@ -2596,9 +2698,83 @@
"summary": "Get the block for the given round."
}
},
+ "/v2/blocks/{round}/lightheader/proof": {
+ "get": {
+ "operationId": "GetLightBlockHeaderProof",
+ "parameters": [
+ {
+ "description": "The round to which the light block header belongs.",
+ "in": "path",
+ "name": "round",
+ "required": true,
+ "schema": {
+ "minimum": 0,
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LightBlockHeaderProof"
+ }
+ }
+ },
+ "description": "Proof of a light block header."
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Could not create proof since some data is missing"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Service Temporarily Unavailable"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Gets a proof for a given light block header inside a state proof commitment"
+ }
+ },
"/v2/blocks/{round}/transactions/{txid}/proof": {
"get": {
- "operationId": "GetProof",
+ "operationId": "GetTransactionProof",
"parameters": [
{
"description": "The round in which the transaction appears.",
@@ -2663,7 +2839,7 @@
"type": "integer"
},
"proof": {
- "description": "Merkle proof of transaction membership.",
+ "description": "Proof of transaction membership.",
"format": "byte",
"pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
"type": "string"
@@ -2730,14 +2906,14 @@
}
}
},
- "description": "Internal error, including protocol not supporting Merkle proofs."
+ "description": "Internal error, including protocol not supporting proofs."
},
"default": {
"content": {},
"description": "Unknown error"
}
},
- "summary": "Get a Merkle proof for a transaction in a block."
+ "summary": "Get a proof for a transaction in a block."
}
},
"/v2/catchup/{catchpoint}": {
@@ -3394,6 +3570,80 @@
]
}
},
+ "/v2/stateproofs/{round}": {
+ "get": {
+ "operationId": "GetStateProof",
+ "parameters": [
+ {
+ "description": "The round for which a state proof is desired.",
+ "in": "path",
+ "name": "round",
+ "required": true,
+ "schema": {
+ "minimum": 0,
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/StateProof"
+ }
+ }
+ },
+ "description": "StateProofResponse wraps the StateProof type in a response."
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Could not find a state proof that covers a given round"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Service Temporarily Unavailable"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get a state proof that covers a given round"
+ }
+ },
"/v2/status": {
"get": {
"operationId": "GetStatus",
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index ecaafc636..b5c309756 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -654,8 +654,14 @@ func (client RestClient) RawDryrun(data []byte) (response []byte, err error) {
return
}
-// Proof gets a Merkle proof for a transaction in a block.
-func (client RestClient) Proof(txid string, round uint64, hashType crypto.HashType) (response generatedV2.ProofResponse, err error) {
+// LightBlockHeaderProof gets a Merkle proof for the light block header of a given round.
+func (client RestClient) LightBlockHeaderProof(round uint64) (response generatedV2.LightBlockHeaderProofResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/lightheader/proof", round), nil)
+ return
+}
+
+// TransactionProof gets a Merkle proof for a transaction in a block.
+func (client RestClient) TransactionProof(txid string, round uint64, hashType crypto.HashType) (response generatedV2.TransactionProofResponse, err error) {
txid = stripTransaction(txid)
err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), proofParams{HashType: hashType.String()})
return
diff --git a/daemon/algod/api/server/lib/bundledSpecInject.go b/daemon/algod/api/server/lib/bundledSpecInject.go
index fefff9416..50fc14c2e 100644
--- a/daemon/algod/api/server/lib/bundledSpecInject.go
+++ b/daemon/algod/api/server/lib/bundledSpecInject.go
@@ -2384,740 +2384,386 @@ func init() {
0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x65, 0x72, 0x69, 0x6F,
0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x6E,
0x52, 0x6F, 0x6F, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61, 0x6D, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74,
- 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74,
- 0x65, 0x72, 0x73, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x4E,
- 0x65, 0x78, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72,
- 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x4E, 0x65, 0x78,
- 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74,
- 0x4E, 0x65, 0x78, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20,
- 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x61, 0x20, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x20,
- 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x69, 0x73, 0x5C, 0x6E,
- 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
- 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
- 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43,
- 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x4E, 0x65, 0x78, 0x74, 0x52, 0x6F,
- 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74,
- 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74,
- 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x72, 0x6F, 0x6F, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x65,
- 0x72, 0x6B, 0x6C, 0x65, 0x20, 0x74, 0x72, 0x65, 0x65, 0x20, 0x6F, 0x66, 0x20, 0x76, 0x6F, 0x74,
- 0x65, 0x72, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x20,
- 0x63, 0x65, 0x72, 0x74, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
- 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74,
- 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72,
- 0x73, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74,
- 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x20, 0x69, 0x73, 0x20, 0x74,
- 0x68, 0x65, 0x20, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20,
- 0x6F, 0x66, 0x20, 0x6D, 0x69, 0x63, 0x72, 0x6F, 0x61, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x68, 0x65,
- 0x6C, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x76, 0x6F, 0x74, 0x65, 0x72, 0x73,
- 0x20, 0x69, 0x6E, 0x5C, 0x6E, 0x74, 0x68, 0x65, 0x20, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74,
- 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x20, 0x6D, 0x65, 0x72, 0x6B, 0x6C,
- 0x65, 0x20, 0x74, 0x72, 0x65, 0x65, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
- 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
- 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D,
- 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x56, 0x6F, 0x74, 0x65, 0x72, 0x73, 0x54, 0x6F,
- 0x74, 0x61, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74,
- 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x50, 0x72,
- 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69,
- 0x6E, 0x67, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E,
- 0x74, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x70,
- 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72,
- 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x75,
- 0x72, 0x72, 0x65, 0x6E, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x66, 0x72, 0x61, 0x63, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72,
- 0x20, 0x6F, 0x66, 0x20, 0x6C, 0x65, 0x66, 0x74, 0x6F, 0x76, 0x65, 0x72, 0x20, 0x4D, 0x69, 0x63,
- 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F,
- 0x66, 0x20, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x52, 0x61, 0x74, 0x65, 0x2F, 0x72, 0x65,
- 0x77, 0x61, 0x72, 0x64, 0x55, 0x6E, 0x69, 0x74, 0x73, 0x5C, 0x6E, 0x4D, 0x69, 0x63, 0x72, 0x6F,
- 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20,
- 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x20, 0x75, 0x6E, 0x69, 0x74, 0x20, 0x69, 0x6E, 0x20, 0x74,
- 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x2E, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x69, 0x64,
- 0x75, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x61, 0x73, 0x68, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x48, 0x61, 0x73, 0x68, 0x20,
- 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x62,
- 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
- 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72,
- 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F,
- 0x6C, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68,
- 0x61, 0x74, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x64, 0x20,
- 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74,
- 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E,
- 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x41, 0x70, 0x70,
- 0x72, 0x6F, 0x76, 0x61, 0x6C, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F,
- 0x6C, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x61, 0x6C, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x62, 0x6C, 0x6F, 0x63,
- 0x6B, 0x73, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x61, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x65,
- 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x75,
- 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65,
- 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36,
- 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74,
- 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x61, 0x6C,
- 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F,
- 0x63, 0x6F, 0x6C, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x4F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72,
- 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x4F, 0x6E, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x6F, 0x6E, 0x20, 0x77,
- 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F,
- 0x6C, 0x20, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x74,
- 0x61, 0x6B, 0x65, 0x20, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
- 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x53, 0x77, 0x69, 0x74,
- 0x63, 0x68, 0x4F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72,
- 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x56, 0x6F, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6F, 0x72, 0x65,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E,
- 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x56, 0x6F, 0x74, 0x65, 0x42,
- 0x65, 0x66, 0x6F, 0x72, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x61,
- 0x64, 0x6C, 0x69, 0x6E, 0x65, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20,
- 0x74, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x75, 0x70,
- 0x67, 0x72, 0x61, 0x64, 0x65, 0x20, 0x28, 0x4E, 0x6F, 0x20, 0x76, 0x6F, 0x74, 0x65, 0x73, 0x20,
- 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x62, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x69, 0x64, 0x65, 0x72,
- 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x6F, 0x75, 0x6E,
- 0x64, 0x29, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74,
- 0x6F, 0x63, 0x6F, 0x6C, 0x56, 0x6F, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6F, 0x72, 0x65, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x70, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x20,
- 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x20, 0x6F, 0x6E,
- 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B,
- 0x20, 0x77, 0x61, 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x50, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x70, 0x72, 0x65, 0x76, 0x69, 0x6F, 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x48, 0x61, 0x73,
- 0x68, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x50, 0x72, 0x65, 0x76, 0x69, 0x6F, 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x48, 0x61, 0x73,
- 0x68, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6F, 0x75,
- 0x73, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6F, 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B,
- 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73,
- 0x65, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x74, 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61, 0x6D, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63,
+ 0x6F, 0x6C, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73,
- 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x72, 0x61, 0x74, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65,
- 0x72, 0x20, 0x6F, 0x66, 0x20, 0x6E, 0x65, 0x77, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C,
- 0x67, 0x6F, 0x73, 0x20, 0x61, 0x64, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73,
- 0x74, 0x61, 0x6B, 0x65, 0x20, 0x66, 0x72, 0x6F, 0x6D, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64,
- 0x73, 0x20, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x72, 0x6F,
- 0x75, 0x6E, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65,
- 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
- 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
- 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64,
- 0x73, 0x52, 0x61, 0x74, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x77, 0x61, 0x72,
- 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x4C, 0x65, 0x76, 0x65, 0x6C, 0x20, 0x73, 0x70, 0x65,
- 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20,
- 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x2C, 0x20, 0x69, 0x6E, 0x20, 0x4D, 0x69, 0x63, 0x72,
- 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x2C, 0x5C, 0x6E, 0x68, 0x61, 0x76, 0x65, 0x20, 0x62, 0x65,
- 0x65, 0x6E, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74,
- 0x6F, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x63, 0x6F, 0x6E, 0x66, 0x69, 0x67, 0x2E, 0x50, 0x72,
- 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x55, 0x6E, 0x69,
- 0x74, 0x5C, 0x6E, 0x6F, 0x66, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73,
- 0x20, 0x73, 0x69, 0x6E, 0x63, 0x65, 0x20, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x2E, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x4C, 0x65, 0x76, 0x65,
- 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64,
- 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20,
- 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x6F, 0x6E, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74,
- 0x68, 0x69, 0x73, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x77, 0x61, 0x73, 0x20, 0x61, 0x70,
- 0x70, 0x65, 0x6E, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x68,
- 0x61, 0x69, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F,
- 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x73, 0x65, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x65, 0x65, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74,
- 0x68, 0x65, 0x20, 0x73, 0x6F, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73, 0x65, 0x65,
- 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x22, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C,
+ 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x61,
+ 0x74, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F,
+ 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x65, 0x65, 0x64, 0x22, 0x0A, 0x20, 0x20,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x50,
+ 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x61,
+ 0x63, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x54, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x6C, 0x65,
+ 0x66, 0x74, 0x6F, 0x76, 0x65, 0x72, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F,
+ 0x73, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x69, 0x73, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x52, 0x65, 0x77, 0x61,
+ 0x72, 0x64, 0x73, 0x52, 0x61, 0x74, 0x65, 0x2F, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x55, 0x6E,
+ 0x69, 0x74, 0x73, 0x5C, 0x6E, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20,
+ 0x66, 0x6F, 0x72, 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64,
+ 0x20, 0x75, 0x6E, 0x69, 0x74, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78,
+ 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E,
+ 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E,
+ 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x69, 0x64, 0x75, 0x65, 0x22, 0x0A, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61, 0x6D, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x53, 0x74,
- 0x61, 0x6D, 0x70, 0x20, 0x69, 0x6E, 0x20, 0x73, 0x65, 0x63, 0x6F, 0x6E, 0x64, 0x73, 0x20, 0x73,
- 0x69, 0x6E, 0x63, 0x65, 0x20, 0x65, 0x70, 0x6F, 0x63, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69,
- 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54,
- 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61, 0x6D, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78,
- 0x6E, 0x52, 0x6F, 0x6F, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73,
- 0x52, 0x6F, 0x6F, 0x74, 0x20, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6E, 0x74, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72,
- 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x61, 0x70, 0x70, 0x65, 0x61,
- 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63,
- 0x6B, 0x2E, 0x5C, 0x6E, 0x4D, 0x6F, 0x72, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x6C, 0x6C, 0x79, 0x2C, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
- 0x72, 0x6F, 0x6F, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x20, 0x6D, 0x65, 0x72, 0x6B, 0x6C, 0x65,
- 0x20, 0x74, 0x72, 0x65, 0x65, 0x20, 0x77, 0x68, 0x6F, 0x73, 0x65, 0x20, 0x6C, 0x65, 0x61, 0x76,
- 0x65, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B,
- 0x27, 0x73, 0x20, 0x54, 0x78, 0x69, 0x64, 0x73, 0x2C, 0x20, 0x69, 0x6E, 0x20, 0x6C, 0x65, 0x78,
- 0x69, 0x63, 0x6F, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x20, 0x6F, 0x72, 0x64, 0x65, 0x72,
- 0x2E, 0x5C, 0x6E, 0x46, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6D, 0x70, 0x74, 0x79,
- 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x2C, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x30, 0x2E, 0x5C,
- 0x6E, 0x4E, 0x6F, 0x74, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x54,
- 0x78, 0x6E, 0x43, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x6D, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x64, 0x6F,
- 0x65, 0x73, 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6E, 0x74, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x20, 0x6F, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2C, 0x20, 0x6F, 0x6E, 0x6C, 0x79, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x6D, 0x73, 0x65, 0x6C, 0x76, 0x65, 0x73, 0x2E, 0x5C, 0x6E, 0x54, 0x77, 0x6F, 0x20, 0x62,
- 0x6C, 0x6F, 0x63, 0x6B, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73,
- 0x61, 0x6D, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73,
- 0x20, 0x62, 0x75, 0x74, 0x20, 0x69, 0x6E, 0x20, 0x61, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72,
- 0x65, 0x6E, 0x74, 0x20, 0x6F, 0x72, 0x64, 0x65, 0x72, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x77, 0x69,
- 0x74, 0x68, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67,
- 0x6E, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x68, 0x61, 0x76,
- 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x54, 0x78, 0x6E, 0x43, 0x6F,
- 0x6D, 0x6D, 0x69, 0x74, 0x6D, 0x65, 0x6E, 0x74, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x6F, 0x6F,
- 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
- 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
- 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74,
- 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x41, 0x70, 0x70,
- 0x72, 0x6F, 0x76, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
- 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76,
- 0x65, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x20, 0x79, 0x65,
- 0x73, 0x20, 0x76, 0x6F, 0x74, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x61, 0x6C, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
- 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x41, 0x70,
- 0x70, 0x72, 0x6F, 0x76, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x75, 0x70, 0x67, 0x72, 0x61,
- 0x64, 0x65, 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x50,
- 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
- 0x20, 0x61, 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x64, 0x20, 0x75, 0x70, 0x67, 0x72,
- 0x61, 0x64, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x68, 0x61, 0x73, 0x68, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x48, 0x61, 0x73, 0x68, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x68,
+ 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22,
0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
- 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64,
- 0x65, 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61,
- 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72,
- 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64,
- 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6C, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20,
- 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6E, 0x74, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x20, 0x62, 0x75, 0x69, 0x6C, 0x64,
- 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D, 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D, 0x69, 0x6E, 0x6F, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x5F, 0x6E, 0x75, 0x6D,
- 0x62, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63,
- 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x5F, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
- 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x42, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22, 0x0A,
+ 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x5F, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
- 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x4E, 0x75, 0x6D, 0x62,
- 0x65, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
- 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x5F, 0x68, 0x61, 0x73, 0x68,
+ 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C,
0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E,
+ 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x69, 0x73, 0x20, 0x61,
+ 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x70,
+ 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74,
+ 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x64, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63,
+ 0x6F, 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x48,
- 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D, 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4D, 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x6D, 0x69, 0x6E, 0x6F, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
- 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36,
- 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4D, 0x69, 0x6E, 0x6F,
- 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
- 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74,
- 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64,
- 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65,
- 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70,
- 0x65, 0x63, 0x2F, 0x63, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65,
- 0x72, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x70,
- 0x61, 0x63, 0x74, 0x43, 0x65, 0x72, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C, 0x20, 0x66,
- 0x69, 0x65, 0x6C, 0x64, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x63, 0x6F, 0x6D, 0x70,
- 0x61, 0x63, 0x74, 0x20, 0x63, 0x65, 0x72, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A,
- 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6E, 0x64, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x65, 0x72, 0x74, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x65, 0x72, 0x74, 0x22, 0x3A, 0x20,
- 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x65, 0x72, 0x74,
- 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6B, 0x20,
- 0x65, 0x6E, 0x63, 0x6F, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20,
- 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x20, 0x63, 0x65, 0x72, 0x74, 0x2E, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
- 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x43, 0x65, 0x72, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6E, 0x64, 0x22, 0x3A, 0x20,
- 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x65, 0x72, 0x74,
- 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x75,
- 0x6E, 0x64, 0x20, 0x77, 0x68, 0x6F, 0x73, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x74,
- 0x68, 0x69, 0x73, 0x20, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x20, 0x63, 0x65, 0x72, 0x74,
- 0x20, 0x72, 0x65, 0x66, 0x65, 0x72, 0x73, 0x20, 0x74, 0x6F, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22,
- 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x43, 0x65, 0x72, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61,
- 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D,
- 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67,
- 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67,
- 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x4B, 0x65, 0x79, 0x72,
- 0x65, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4B, 0x65, 0x79, 0x72,
- 0x65, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70,
- 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61,
- 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C, 0x20, 0x66, 0x69, 0x65, 0x6C, 0x64, 0x73,
- 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x6B, 0x65, 0x79, 0x72, 0x65, 0x67, 0x20, 0x54, 0x72,
- 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63,
- 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65,
- 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x73, 0x65, 0x6C, 0x6B, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x65, 0x6C, 0x65, 0x63, 0x74, 0x69, 0x6F,
- 0x6E, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x56, 0x52, 0x46, 0x20, 0x70,
- 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x69,
- 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
- 0x6F, 0x6E, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x53, 0x65, 0x6C, 0x65, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x66, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x46, 0x69, 0x72,
- 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20,
- 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69,
- 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x69, 0x73, 0x20,
- 0x76, 0x61, 0x6C, 0x69, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67,
- 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
- 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x46,
- 0x69, 0x72, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6B, 0x64,
+ 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F,
+ 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50,
+ 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x61, 0x6C, 0x73,
0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56,
- 0x6F, 0x74, 0x65, 0x4B, 0x65, 0x79, 0x44, 0x69, 0x6C, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x69, 0x6C, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66,
- 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x32, 0x2D, 0x6C, 0x65, 0x76, 0x65, 0x6C, 0x20, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6B, 0x65, 0x79,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E,
+ 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x41, 0x70, 0x70, 0x72, 0x6F,
+ 0x76, 0x61, 0x6C, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62,
+ 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x73, 0x20, 0x77, 0x68, 0x69,
+ 0x63, 0x68, 0x20, 0x61, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x65, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65,
0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4B, 0x65, 0x79, 0x44, 0x69, 0x6C,
- 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6B,
- 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x56, 0x6F, 0x74, 0x65, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x75, 0x62,
- 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20,
- 0x6B, 0x65, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6F, 0x6E,
- 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
- 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x56, 0x6F, 0x74, 0x65, 0x50, 0x4B, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65,
- 0x6C, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
- 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x74, 0x68, 0x69,
- 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20,
- 0x6B, 0x65, 0x79, 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x22, 0x2C, 0x0A, 0x20,
+ 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63,
+ 0x6F, 0x6C, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x61, 0x6C, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x53, 0x77, 0x69,
+ 0x74, 0x63, 0x68, 0x4F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
+ 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C,
+ 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x4F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x6F, 0x6E, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x75, 0x70, 0x67, 0x72,
+ 0x61, 0x64, 0x65, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x65, 0x66,
+ 0x66, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65,
+ 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
+ 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72,
+ 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x4F, 0x6E, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C,
+ 0x56, 0x6F, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6F, 0x72, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F,
+ 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x56, 0x6F, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6F, 0x72, 0x65, 0x20,
+ 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x61, 0x64, 0x6C, 0x69, 0x6E, 0x65, 0x20,
+ 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x70,
+ 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x20,
+ 0x28, 0x4E, 0x6F, 0x20, 0x76, 0x6F, 0x74, 0x65, 0x73, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x62,
+ 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x69, 0x64, 0x65, 0x72, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x29, 0x22, 0x2C, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61,
- 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D,
- 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67,
- 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67,
- 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x4E, 0x6F, 0x64, 0x65,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x4E, 0x6F, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x74,
- 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x62, 0x6F, 0x75, 0x74, 0x20, 0x61, 0x20, 0x6E, 0x6F, 0x64,
- 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72,
- 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73,
- 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73,
- 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73,
- 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F,
- 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x53, 0x75,
- 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74,
- 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x63, 0x61, 0x74, 0x63, 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D, 0x65, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x61, 0x73, 0x53, 0x79, 0x6E, 0x63, 0x65,
- 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64,
- 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x52, 0x6F, 0x75,
- 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x61, 0x74, 0x63,
- 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x61, 0x74, 0x63, 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D,
- 0x65, 0x20, 0x69, 0x6E, 0x20, 0x6E, 0x61, 0x6E, 0x6F, 0x73, 0x65, 0x63, 0x6F, 0x6E, 0x64, 0x73,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x61, 0x74, 0x63, 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D, 0x65,
- 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x61, 0x73, 0x53, 0x79, 0x6E, 0x63, 0x65, 0x64, 0x53,
- 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x56, 0x6F,
+ 0x74, 0x65, 0x42, 0x65, 0x66, 0x6F, 0x72, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x65,
+ 0x72, 0x69, 0x6F, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x50, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x70, 0x65, 0x72, 0x69, 0x6F, 0x64, 0x20, 0x6F, 0x6E, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x77, 0x61, 0x73, 0x20, 0x63,
+ 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E,
+ 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E,
+ 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x65,
+ 0x72, 0x69, 0x6F, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6F,
+ 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x48, 0x61, 0x73, 0x68, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x48, 0x61, 0x73, 0x53, 0x79, 0x6E,
- 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x20,
- 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65,
- 0x72, 0x20, 0x61, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x68, 0x61, 0x73, 0x20, 0x63, 0x6F,
- 0x6D, 0x70, 0x6C, 0x65, 0x74, 0x65, 0x64, 0x20, 0x73, 0x69, 0x6E, 0x63, 0x65, 0x20, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x75, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x6F, 0x6F, 0x6C, 0x65,
- 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x48, 0x61, 0x73,
- 0x53, 0x79, 0x6E, 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x75, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x43, 0x6F, 0x6E, 0x73,
- 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6F,
+ 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x48, 0x61, 0x73, 0x68, 0x20, 0x69, 0x73, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6F, 0x75, 0x73, 0x20, 0x62, 0x6C, 0x6F, 0x63,
+ 0x6B, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
+ 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x72, 0x65,
+ 0x76, 0x69, 0x6F, 0x75, 0x73, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x22, 0x3A, 0x20, 0x7B,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E,
- 0x73, 0x75, 0x73, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x73, 0x75, 0x70, 0x70,
- 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E,
- 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73,
- 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x72, 0x6F, 0x70, 0x6F,
+ 0x73, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B,
+ 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x61,
+ 0x74, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x54, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x6E,
+ 0x65, 0x77, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x61, 0x64,
+ 0x64, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73, 0x74, 0x61, 0x6B, 0x65, 0x20, 0x66,
+ 0x72, 0x6F, 0x6D, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x61, 0x74, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x2E, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
+ 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x52, 0x61, 0x74, 0x65, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64,
+ 0x73, 0x4C, 0x65, 0x76, 0x65, 0x6C, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73,
+ 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x61, 0x6E, 0x79, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64,
+ 0x73, 0x2C, 0x20, 0x69, 0x6E, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73,
+ 0x2C, 0x5C, 0x6E, 0x68, 0x61, 0x76, 0x65, 0x20, 0x62, 0x65, 0x65, 0x6E, 0x20, 0x64, 0x69, 0x73,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x65, 0x61, 0x63, 0x68,
+ 0x20, 0x63, 0x6F, 0x6E, 0x66, 0x69, 0x67, 0x2E, 0x50, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C,
+ 0x2E, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x55, 0x6E, 0x69, 0x74, 0x5C, 0x6E, 0x6F, 0x66, 0x20,
+ 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x73, 0x69, 0x6E, 0x63, 0x65,
+ 0x20, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
+ 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
+ 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52,
+ 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x4C, 0x65, 0x76, 0x65, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69,
- 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73,
- 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x73, 0x65, 0x65, 0x6E, 0x22, 0x2C, 0x0A, 0x20,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20,
+ 0x6F, 0x6E, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x62, 0x6C,
+ 0x6F, 0x63, 0x6B, 0x20, 0x77, 0x61, 0x73, 0x20, 0x61, 0x70, 0x70, 0x65, 0x6E, 0x64, 0x65, 0x64,
+ 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x68, 0x61, 0x69, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
+ 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x65,
+ 0x65, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x53, 0x65, 0x65, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x6F, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73, 0x65, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x53, 0x65, 0x65, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x6D, 0x65,
+ 0x73, 0x74, 0x61, 0x6D, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
+ 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x53, 0x74, 0x61, 0x6D, 0x70, 0x20, 0x69, 0x6E,
+ 0x20, 0x73, 0x65, 0x63, 0x6F, 0x6E, 0x64, 0x73, 0x20, 0x73, 0x69, 0x6E, 0x63, 0x65, 0x20, 0x65,
+ 0x70, 0x6F, 0x63, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65,
+ 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
+ 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
+ 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61,
+ 0x6D, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x6E, 0x52, 0x6F, 0x6F, 0x74, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x6F, 0x6F, 0x74, 0x20, 0x61,
+ 0x75, 0x74, 0x68, 0x65, 0x6E, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x73, 0x65, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x61, 0x70, 0x70, 0x65, 0x61, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69,
+ 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x2E, 0x5C, 0x6E, 0x4D, 0x6F,
+ 0x72, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x6C, 0x6C, 0x79, 0x2C,
+ 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x6F, 0x74, 0x20, 0x6F,
+ 0x66, 0x20, 0x61, 0x20, 0x6D, 0x65, 0x72, 0x6B, 0x6C, 0x65, 0x20, 0x74, 0x72, 0x65, 0x65, 0x20,
+ 0x77, 0x68, 0x6F, 0x73, 0x65, 0x20, 0x6C, 0x65, 0x61, 0x76, 0x65, 0x73, 0x20, 0x61, 0x72, 0x65,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x27, 0x73, 0x20, 0x54, 0x78, 0x69,
+ 0x64, 0x73, 0x2C, 0x20, 0x69, 0x6E, 0x20, 0x6C, 0x65, 0x78, 0x69, 0x63, 0x6F, 0x67, 0x72, 0x61,
+ 0x70, 0x68, 0x69, 0x63, 0x20, 0x6F, 0x72, 0x64, 0x65, 0x72, 0x2E, 0x5C, 0x6E, 0x46, 0x6F, 0x72,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6D, 0x70, 0x74, 0x79, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B,
+ 0x2C, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x30, 0x2E, 0x5C, 0x6E, 0x4E, 0x6F, 0x74, 0x65, 0x20,
+ 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x54, 0x78, 0x6E, 0x43, 0x6F, 0x6D, 0x6D,
+ 0x69, 0x74, 0x6D, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x64, 0x6F, 0x65, 0x73, 0x20, 0x6E, 0x6F, 0x74,
+ 0x20, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6E, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x20, 0x6F, 0x6E, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2C, 0x20, 0x6F, 0x6E, 0x6C, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x6D, 0x73, 0x65, 0x6C, 0x76,
+ 0x65, 0x73, 0x2E, 0x5C, 0x6E, 0x54, 0x77, 0x6F, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x73, 0x20,
+ 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x61, 0x6D, 0x65, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x62, 0x75, 0x74, 0x20, 0x69,
+ 0x6E, 0x20, 0x61, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6F, 0x72,
+ 0x64, 0x65, 0x72, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x64, 0x69, 0x66,
+ 0x66, 0x65, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x73, 0x61, 0x6D, 0x65, 0x20, 0x54, 0x78, 0x6E, 0x43, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x6D, 0x65,
+ 0x6E, 0x74, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
+ 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x6F, 0x6F, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x74, 0x78, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64,
+ 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x65, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67,
+ 0x72, 0x61, 0x64, 0x65, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x65, 0x20, 0x69, 0x6E, 0x64, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x20, 0x79, 0x65, 0x73, 0x20, 0x76, 0x6F, 0x74, 0x65,
+ 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74,
+ 0x20, 0x70, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x61, 0x6C, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62,
+ 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x41, 0x70, 0x70, 0x72, 0x6F, 0x76, 0x65, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x50, 0x72, 0x6F, 0x70,
+ 0x6F, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x50, 0x72, 0x6F, 0x70, 0x6F, 0x73, 0x65,
+ 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6F,
+ 0x70, 0x6F, 0x73, 0x65, 0x64, 0x20, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x50, 0x72, 0x6F, 0x70, 0x6F,
+ 0x73, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
+ 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E,
+ 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61,
+ 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73,
+ 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
+ 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x42, 0x75, 0x69,
+ 0x6C, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69,
+ 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x61,
+ 0x6C, 0x67, 0x6F, 0x64, 0x20, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D,
+ 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x6D, 0x69, 0x6E, 0x6F, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x5F, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x5F,
+ 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x62, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x63, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x62, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x42, 0x72, 0x61, 0x6E, 0x63, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75,
+ 0x69, 0x6C, 0x64, 0x5F, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
- 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x4E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x22, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x63, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20,
- 0x6F, 0x66, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x70, 0x72, 0x6F,
- 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x74, 0x6F, 0x20, 0x75, 0x73, 0x65, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x22, 0x43, 0x68, 0x61, 0x6E, 0x6E, 0x65, 0x6C, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63,
+ 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x5F, 0x68, 0x61, 0x73, 0x68, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75,
- 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20,
- 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x73, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x61, 0x74, 0x20, 0x77, 0x68, 0x69,
- 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20, 0x63, 0x6F, 0x6E, 0x73,
- 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x77, 0x69,
- 0x6C, 0x6C, 0x20, 0x61, 0x70, 0x70, 0x6C, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E,
- 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E,
- 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65,
- 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75,
- 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x53, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65,
- 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x53, 0x75, 0x70, 0x70, 0x6F,
- 0x72, 0x74, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x77,
- 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74, 0x20,
- 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F,
- 0x6E, 0x20, 0x69, 0x73, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x20, 0x62,
- 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x62, 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x53, 0x75,
- 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x74, 0x6F,
- 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65,
- 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E,
- 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69,
- 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x20, 0x64, 0x6F, 0x65, 0x73, 0x20, 0x6E, 0x6F, 0x74, 0x20,
- 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x77, 0x20,
- 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x68, 0x61, 0x73, 0x20, 0x73,
- 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64, 0x20, 0x6D, 0x61, 0x6B, 0x69, 0x6E, 0x67, 0x20, 0x70, 0x72,
- 0x6F, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x6F, 0x6F, 0x6C,
- 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74,
- 0x6F, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74,
- 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x6D,
- 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22,
+ 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x6D, 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69,
+ 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4D,
+ 0x61, 0x6A, 0x6F, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D, 0x69, 0x6E, 0x6F, 0x72, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
+ 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
+ 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4D, 0x69, 0x6E, 0x6F, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B,
+ 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F,
+ 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C,
+ 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C,
+ 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x63, 0x6F, 0x6D,
+ 0x6D, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x4B, 0x65, 0x79, 0x72, 0x65, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x4B, 0x65, 0x79, 0x72, 0x65, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C, 0x20, 0x66,
+ 0x69, 0x65, 0x6C, 0x64, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x6B, 0x65, 0x79, 0x72,
+ 0x65, 0x67, 0x20, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x65, 0x6C, 0x6B, 0x65, 0x79, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x65, 0x6C,
+ 0x65, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x56, 0x52, 0x46, 0x20, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x75,
+ 0x73, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x72, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F,
+ 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
+ 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x65, 0x6C, 0x65, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x50, 0x4B, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x66, 0x73, 0x74, 0x22,
0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69,
- 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64,
- 0x20, 0x69, 0x6E, 0x20, 0x6E, 0x61, 0x6E, 0x6F, 0x73, 0x65, 0x63, 0x6F, 0x6E, 0x64, 0x73, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73,
- 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67,
- 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E,
- 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61,
- 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
- 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x44, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63,
- 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69,
- 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x70, 0x61, 0x72, 0x74, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x72, 0x66, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x66, 0x73,
- 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74,
- 0x65, 0x6C, 0x73, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x76, 0x6F, 0x74, 0x65, 0x6B, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69,
- 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x70, 0x61, 0x72, 0x74, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
- 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72,
- 0x6F, 0x6F, 0x74, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F,
- 0x6E, 0x20, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x28, 0x69, 0x66,
- 0x20, 0x61, 0x6E, 0x79, 0x29, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20,
- 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74,
- 0x68, 0x69, 0x73, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
- 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74,
- 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x76, 0x6F, 0x74, 0x65, 0x66, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x46, 0x69, 0x72, 0x73, 0x74,
- 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x72, 0x6F,
- 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68,
- 0x69, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E,
- 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F,
+ 0x74, 0x65, 0x46, 0x69, 0x72, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66,
+ 0x69, 0x72, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6B, 0x65,
+ 0x79, 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
@@ -3128,11 +2774,10 @@ func init() {
0x6F, 0x74, 0x65, 0x6B, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4B, 0x65, 0x79, 0x44, 0x69, 0x6C, 0x75, 0x74,
- 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65,
- 0x72, 0x20, 0x6F, 0x66, 0x20, 0x73, 0x75, 0x62, 0x6B, 0x65, 0x79, 0x73, 0x20, 0x69, 0x6E, 0x20,
- 0x66, 0x6F, 0x72, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x20, 0x6F,
- 0x66, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20,
- 0x6B, 0x65, 0x79, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x69, 0x6C, 0x75, 0x74,
+ 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x32, 0x2D, 0x6C, 0x65,
+ 0x76, 0x65, 0x6C, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F,
+ 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67,
0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34,
@@ -3140,696 +2785,699 @@ func init() {
0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4B,
0x65, 0x79, 0x44, 0x69, 0x6C, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x76, 0x6F, 0x74, 0x65, 0x6C, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x76, 0x6F, 0x74, 0x65, 0x6B, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64,
- 0x20, 0x66, 0x6F, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20,
- 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73,
- 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
- 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
- 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74,
- 0x65, 0x4C, 0x61, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x72, 0x66, 0x70, 0x6B,
- 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
- 0x20, 0x22, 0x56, 0x52, 0x46, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73,
- 0x65, 0x6C, 0x65, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20,
- 0x6B, 0x65, 0x79, 0x20, 0x28, 0x69, 0x66, 0x20, 0x61, 0x6E, 0x79, 0x29, 0x20, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65,
- 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x56, 0x52, 0x46, 0x50, 0x4B, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61,
- 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72,
- 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64,
- 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50, 0x61, 0x79, 0x6D, 0x65, 0x6E,
- 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x61, 0x79, 0x6D, 0x65,
- 0x6E, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70,
- 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61,
- 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C, 0x20, 0x66, 0x69, 0x65, 0x6C, 0x64, 0x73,
- 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x70, 0x61, 0x79, 0x6D, 0x65, 0x6E, 0x74, 0x20, 0x54,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65,
- 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75,
- 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x6F, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69,
- 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74,
- 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x4D, 0x69, 0x63,
- 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x69, 0x6E, 0x74, 0x65, 0x6E, 0x64, 0x65, 0x64,
- 0x20, 0x74, 0x6F, 0x20, 0x62, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x65, 0x72, 0x72,
- 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D,
- 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x54, 0x6F, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
- 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6E, 0x64,
- 0x65, 0x72, 0x20, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D, 0x61, 0x69, 0x6E, 0x64, 0x65,
- 0x72, 0x54, 0x6F, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x61, 0x6D,
- 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
- 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x41, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x73, 0x65, 0x6E,
- 0x74, 0x20, 0x74, 0x6F, 0x20, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D, 0x61, 0x69, 0x6E,
- 0x64, 0x65, 0x72, 0x54, 0x6F, 0x2C, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x69,
- 0x74, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x41, 0x6D, 0x6F, 0x75, 0x6E,
- 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x72, 0x65, 0x77, 0x61,
- 0x72, 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
- 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66, 0x20,
- 0x70, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20,
- 0x61, 0x70, 0x70, 0x6C, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x43,
- 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D, 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x54, 0x6F, 0x5C,
- 0x6E, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x61, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74,
- 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F,
+ 0x6E, 0x20, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x75, 0x73, 0x65,
+ 0x64, 0x20, 0x69, 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
+ 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x50, 0x4B, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6C, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x20,
+ 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E,
+ 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61,
+ 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69,
+ 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
+ 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
+ 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67,
+ 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F,
+ 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63,
+ 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x4E, 0x6F, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6E,
+ 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x62, 0x6F, 0x75, 0x74, 0x20,
+ 0x61, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F,
+ 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72,
+ 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x43, 0x6F,
+ 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F,
+ 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F,
+ 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F,
+ 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E,
+ 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6F, 0x6E, 0x53, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63,
+ 0x65, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x61, 0x74, 0x63, 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D,
+ 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x61, 0x73,
+ 0x53, 0x79, 0x6E, 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74,
+ 0x75, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x74,
+ 0x6F, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74,
+ 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x63, 0x61, 0x74, 0x63, 0x68, 0x75, 0x70, 0x54, 0x69, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x61, 0x74, 0x63, 0x68,
+ 0x75, 0x70, 0x54, 0x69, 0x6D, 0x65, 0x20, 0x69, 0x6E, 0x20, 0x6E, 0x61, 0x6E, 0x6F, 0x73, 0x65,
+ 0x63, 0x6F, 0x6E, 0x64, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67,
0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
- 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65,
- 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F,
- 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72,
- 0x27, 0x73, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x61, 0x74, 0x63, 0x68, 0x75,
+ 0x70, 0x54, 0x69, 0x6D, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x68, 0x61, 0x73, 0x53, 0x79,
+ 0x6E, 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70,
+ 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x48,
+ 0x61, 0x73, 0x53, 0x79, 0x6E, 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x75, 0x70, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x77,
+ 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x61, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x68,
+ 0x61, 0x73, 0x20, 0x63, 0x6F, 0x6D, 0x70, 0x6C, 0x65, 0x74, 0x65, 0x64, 0x20, 0x73, 0x69, 0x6E,
+ 0x63, 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x62, 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x48, 0x61, 0x73, 0x53, 0x79, 0x6E, 0x63, 0x65, 0x64, 0x53, 0x69, 0x6E, 0x63, 0x65,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73,
+ 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x4C, 0x61, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x64, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x63,
+ 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
+ 0x20, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x54, 0x6F, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x72, 0x65, 0x77, 0x61, 0x72,
- 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x4C, 0x61, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F,
+ 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x73, 0x65, 0x65,
+ 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
+ 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
+ 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E,
+ 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75,
+ 0x73, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x74, 0x6F, 0x20, 0x75, 0x73,
+ 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E,
+ 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75,
+ 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x54, 0x6F, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x70, 0x65, 0x6E, 0x64,
- 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x61, 0x70, 0x70, 0x6C,
- 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x54, 0x6F, 0x20, 0x61, 0x63,
- 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x5C, 0x6E, 0x61, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x20, 0x6F,
- 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F,
- 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x52, 0x65, 0x77, 0x61, 0x72,
- 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
- 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E,
- 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61,
- 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73,
- 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
- 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73,
- 0x20, 0x61, 0x20, 0x70, 0x6F, 0x74, 0x65, 0x6E, 0x74, 0x69, 0x61, 0x6C, 0x6C, 0x79, 0x20, 0x74,
- 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66,
- 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E,
- 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x20, 0x70, 0x6F, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72,
- 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78,
- 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61,
- 0x6C, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x2C, 0x0A,
+ 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F, 0x75, 0x6E,
+ 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x61,
+ 0x74, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x65, 0x78, 0x74,
+ 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6F, 0x6E, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x61, 0x70, 0x70, 0x6C, 0x79, 0x22, 0x2C, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20,
+ 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x52, 0x6F,
+ 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x65, 0x78, 0x74, 0x43, 0x6F, 0x6E,
+ 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x53, 0x75, 0x70,
+ 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
+ 0x53, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x6E, 0x65, 0x78, 0x74, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72,
+ 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6E, 0x6F, 0x64, 0x65,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
+ 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x65, 0x78, 0x74, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6F, 0x6E, 0x53, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x22, 0x0A, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x78, 0x6E, 0x73, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
- 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
- 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72,
- 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F,
- 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69,
- 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D,
- 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x77, 0x61, 0x67,
- 0x67, 0x65, 0x72, 0x3A, 0x20, 0x6D, 0x6F, 0x64, 0x65, 0x6C, 0x20, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6C, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x20, 0x72, 0x65, 0x70,
- 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x61, 0x20, 0x4C, 0x6F, 0x63, 0x61, 0x6C, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x20, 0x6F, 0x72, 0x20, 0x47, 0x6C,
- 0x6F, 0x62, 0x61, 0x6C, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x2E,
- 0x20, 0x54, 0x68, 0x65, 0x73, 0x65, 0x5C, 0x6E, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x73, 0x20,
- 0x64, 0x65, 0x74, 0x65, 0x72, 0x6D, 0x69, 0x6E, 0x65, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x75,
- 0x63, 0x68, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x61, 0x67, 0x65, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62,
- 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x61, 0x20, 0x4C, 0x6F, 0x63, 0x61,
- 0x6C, 0x53, 0x74, 0x61, 0x74, 0x65, 0x20, 0x6F, 0x72, 0x5C, 0x6E, 0x47, 0x6C, 0x6F, 0x62, 0x61,
- 0x6C, 0x53, 0x74, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x70,
- 0x70, 0x6C, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6D,
- 0x6F, 0x72, 0x65, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x2C, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x6D, 0x69, 0x6E, 0x69, 0x6D,
- 0x75, 0x6D, 0x5C, 0x6E, 0x62, 0x61, 0x6C, 0x61, 0x6E, 0x63, 0x65, 0x20, 0x6D, 0x75, 0x73, 0x74,
- 0x20, 0x62, 0x65, 0x20, 0x6D, 0x61, 0x69, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x65, 0x64, 0x20, 0x69,
- 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x68, 0x6F,
- 0x6C, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2E, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65,
- 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x75,
- 0x69, 0x6E, 0x74, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x62, 0x79, 0x74, 0x65, 0x73, 0x6C, 0x69, 0x63, 0x65, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70,
- 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x73, 0x6C, 0x69, 0x63, 0x65, 0x73, 0x22, 0x3A,
+ 0x20, 0x22, 0x73, 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70,
+ 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x6F, 0x70, 0x70, 0x65,
+ 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x52, 0x6F,
+ 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68,
+ 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x20, 0x64, 0x6F, 0x65, 0x73,
+ 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6F, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x6E, 0x65, 0x77, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x20,
+ 0x68, 0x61, 0x73, 0x20, 0x73, 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64, 0x20, 0x6D, 0x61, 0x6B, 0x69,
+ 0x6E, 0x67, 0x20, 0x70, 0x72, 0x6F, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x62, 0x6F, 0x6F, 0x6C, 0x65, 0x61, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x53, 0x74, 0x6F, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6E, 0x73, 0x75,
+ 0x70, 0x70, 0x6F, 0x72, 0x74, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x52,
+ 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E, 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74,
+ 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x6E, 0x61, 0x6E, 0x6F, 0x73, 0x65, 0x63,
+ 0x6F, 0x6E, 0x64, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65,
+ 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
+ 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
+ 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x69, 0x6D, 0x65, 0x53, 0x69, 0x6E,
+ 0x63, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B,
+ 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F,
+ 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C,
+ 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C,
+ 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69,
+ 0x6F, 0x6E, 0x20, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x61, 0x72, 0x74, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x72, 0x66, 0x70, 0x6B,
+ 0x62, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76,
+ 0x6F, 0x74, 0x65, 0x66, 0x73, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6C, 0x73, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6B, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F,
+ 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x61, 0x72, 0x74, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22, 0x3A,
0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D,
- 0x42, 0x79, 0x74, 0x65, 0x53, 0x6C, 0x69, 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x6D, 0x61, 0x78, 0x69, 0x6D, 0x75, 0x6D, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20,
- 0x6F, 0x66, 0x20, 0x54, 0x45, 0x41, 0x4C, 0x20, 0x62, 0x79, 0x74, 0x65, 0x20, 0x73, 0x6C, 0x69,
- 0x63, 0x65, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62, 0x65, 0x5C,
- 0x6E, 0x73, 0x74, 0x6F, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6B,
- 0x65, 0x79, 0x2F, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x65, 0x22, 0x2C,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x72, 0x6F, 0x6F, 0x74, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69,
+ 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65,
+ 0x79, 0x20, 0x28, 0x69, 0x66, 0x20, 0x61, 0x6E, 0x79, 0x29, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20,
+ 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x42, 0x79, 0x74, 0x65, 0x53, 0x6C, 0x69, 0x63, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
+ 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x4B,
0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x55, 0x69, 0x6E,
- 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x61, 0x78, 0x69, 0x6D, 0x75, 0x6D,
- 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x54, 0x45, 0x41, 0x4C, 0x20,
- 0x75, 0x69, 0x6E, 0x74, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62,
- 0x65, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x5C, 0x6E, 0x74, 0x68, 0x65,
- 0x20, 0x6B, 0x65, 0x79, 0x2F, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x65,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x55, 0x69, 0x6E, 0x74, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61,
- 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E,
- 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D,
- 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F,
- 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76,
- 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53,
- 0x75, 0x70, 0x70, 0x6C, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x53, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x73, 0x75,
- 0x70, 0x70, 0x6C, 0x79, 0x20, 0x6F, 0x66, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67,
- 0x6F, 0x73, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6D,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F,
- 0x6E, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6F,
- 0x6E, 0x6C, 0x69, 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70,
- 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x6F, 0x6E, 0x6C, 0x69, 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4F, 0x6E,
- 0x6C, 0x69, 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
- 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
- 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4F,
- 0x6E, 0x6C, 0x69, 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x66, 0x73, 0x74, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65,
+ 0x46, 0x69, 0x72, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72,
+ 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x77, 0x68, 0x69,
+ 0x63, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
+ 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x2E, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
+ 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x46, 0x69, 0x72, 0x73, 0x74, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6B, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4B, 0x65, 0x79,
+ 0x44, 0x69, 0x6C, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x73, 0x75, 0x62, 0x6B, 0x65, 0x79,
+ 0x73, 0x20, 0x69, 0x6E, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x62, 0x61,
+ 0x74, 0x63, 0x68, 0x20, 0x6F, 0x66, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61,
+ 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6B, 0x65, 0x79, 0x73, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
+ 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x56, 0x6F, 0x74, 0x65, 0x4B, 0x65, 0x79, 0x44, 0x69, 0x6C, 0x75, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x6F, 0x74, 0x65, 0x6C, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C,
+ 0x61, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20,
+ 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20,
+ 0x74, 0x68, 0x69, 0x73, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69,
+ 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
+ 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x56, 0x6F, 0x74, 0x65, 0x4C, 0x61, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
- 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
- 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52,
- 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x4D,
- 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
- 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
- 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20,
+ 0x76, 0x72, 0x66, 0x70, 0x6B, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x52, 0x46, 0x50, 0x4B, 0x20, 0x69, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6C, 0x65, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x75,
+ 0x62, 0x6C, 0x69, 0x63, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x28, 0x69, 0x66, 0x20, 0x61, 0x6E, 0x79,
+ 0x29, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20,
+ 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
+ 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x52, 0x46, 0x50, 0x4B, 0x22, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61,
0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E,
0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D,
0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F,
0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76,
- 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
- 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x6C, 0x6C, 0x20, 0x66, 0x69,
- 0x65, 0x6C, 0x64, 0x73, 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E, 0x20, 0x74, 0x6F, 0x20, 0x61,
- 0x6C, 0x6C, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20,
- 0x61, 0x6E, 0x64, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x73, 0x20, 0x61, 0x73, 0x20, 0x61, 0x6E,
- 0x20, 0x65, 0x6E, 0x76, 0x65, 0x6C, 0x6F, 0x70, 0x65, 0x20, 0x74, 0x6F, 0x20, 0x61, 0x6C, 0x6C,
- 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x5C, 0x6E, 0x74,
- 0x79, 0x70, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20,
- 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2D, 0x72, 0x6F,
- 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C,
- 0x61, 0x73, 0x74, 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x70, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x63, 0x65, 0x72, 0x74, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73,
- 0x69, 0x73, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x68, 0x61, 0x73, 0x68, 0x62, 0x36, 0x34, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x70, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
- 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
- 0x2F, 0x41, 0x70, 0x70, 0x6C, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x43, 0x61, 0x6C, 0x6C,
- 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x63, 0x65, 0x72, 0x74,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69,
- 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x43, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x43, 0x65, 0x72,
- 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65,
- 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x75, 0x72, 0x63, 0x66, 0x67, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66,
- 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E,
- 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74, 0x43, 0x6F, 0x6E, 0x66, 0x69, 0x67, 0x54, 0x72, 0x61,
- 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x63, 0x75, 0x72, 0x66, 0x72, 0x7A, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22,
- 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73,
- 0x73, 0x65, 0x74, 0x46, 0x72, 0x65, 0x65, 0x7A, 0x65, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x75,
- 0x72, 0x78, 0x66, 0x65, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64,
- 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74,
- 0x54, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46,
- 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
- 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
- 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46,
- 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2D, 0x72, 0x6F,
- 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
- 0x20, 0x22, 0x46, 0x69, 0x72, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74,
- 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72,
- 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
- 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
- 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
- 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x69, 0x72, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E,
- 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50,
+ 0x61, 0x79, 0x6D, 0x65, 0x6E, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x50, 0x61, 0x79, 0x6D, 0x65, 0x6E, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C, 0x20, 0x66,
+ 0x69, 0x65, 0x6C, 0x64, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x70, 0x61, 0x79, 0x6D,
+ 0x65, 0x6E, 0x74, 0x20, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F,
+ 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x72, 0x6F, 0x6D, 0x20, 0x69,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6E, 0x64, 0x65, 0x72, 0x27, 0x73, 0x20, 0x61,
- 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
- 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x72, 0x6F,
- 0x6D, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x72, 0x65, 0x77, 0x61, 0x72,
- 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x46, 0x72, 0x6F, 0x6D, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x69, 0x73, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x70, 0x65,
- 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20, 0x61, 0x70,
- 0x70, 0x6C, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20, 0x46, 0x72, 0x6F,
- 0x6D, 0x5C, 0x6E, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x61, 0x73, 0x20, 0x70, 0x61,
- 0x72, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x6D, 0x6F, 0x75, 0x6E, 0x74,
+ 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F,
+ 0x66, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x69, 0x6E, 0x74,
+ 0x65, 0x6E, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x62, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E,
+ 0x73, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x72, 0x6F,
- 0x6D, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65,
- 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x20, 0x49, 0x44,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E,
- 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44,
- 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x68, 0x61, 0x73,
- 0x68, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x6D, 0x6F,
+ 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F,
+ 0x73, 0x65, 0x52, 0x65, 0x6D, 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x54, 0x6F, 0x20, 0x69, 0x73,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x73, 0x65, 0x6E, 0x64, 0x65, 0x72, 0x20, 0x63, 0x6C, 0x6F, 0x73, 0x65, 0x64, 0x20, 0x74,
+ 0x6F, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D,
+ 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x54, 0x6F, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C,
+ 0x6F, 0x73, 0x65, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x41, 0x6D, 0x6F,
+ 0x75, 0x6E, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E,
+ 0x74, 0x20, 0x73, 0x65, 0x6E, 0x74, 0x20, 0x74, 0x6F, 0x20, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52,
+ 0x65, 0x6D, 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x54, 0x6F, 0x2C, 0x20, 0x66, 0x6F, 0x72, 0x20,
+ 0x63, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67,
+ 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
+ 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65,
+ 0x41, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6C, 0x6F, 0x73,
+ 0x65, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x77, 0x61,
+ 0x72, 0x64, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E,
+ 0x74, 0x20, 0x6F, 0x66, 0x20, 0x70, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77,
+ 0x61, 0x72, 0x64, 0x73, 0x20, 0x61, 0x70, 0x70, 0x6C, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x6D, 0x61, 0x69, 0x6E, 0x64,
+ 0x65, 0x72, 0x54, 0x6F, 0x5C, 0x6E, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x61, 0x73,
+ 0x20, 0x70, 0x61, 0x72, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
+ 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x43, 0x6C, 0x6F, 0x73, 0x65, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x6F, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
- 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22,
+ 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x63,
+ 0x65, 0x69, 0x76, 0x65, 0x72, 0x27, 0x73, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22,
0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x67, 0x72, 0x6F, 0x75, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x72, 0x6F, 0x75, 0x70, 0x22, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22,
- 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x47,
- 0x72, 0x6F, 0x75, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6B, 0x65, 0x79, 0x72, 0x65, 0x67,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69,
- 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x4B, 0x65, 0x79, 0x72, 0x65, 0x67, 0x54, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x6C, 0x61, 0x73, 0x74, 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F,
- 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x20, 0x72, 0x6F, 0x75,
- 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
- 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
- 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73,
- 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x65, 0x61, 0x73,
- 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x4C, 0x65, 0x61, 0x73, 0x65, 0x20, 0x65, 0x6E, 0x66, 0x6F, 0x72, 0x63, 0x65, 0x73, 0x20, 0x6D,
- 0x75, 0x74, 0x75, 0x61, 0x6C, 0x20, 0x65, 0x78, 0x63, 0x6C, 0x75, 0x73, 0x69, 0x6F, 0x6E, 0x20,
- 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2E,
- 0x20, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x66, 0x69, 0x65, 0x6C, 0x64, 0x20,
- 0x69, 0x73, 0x5C, 0x6E, 0x6E, 0x6F, 0x6E, 0x7A, 0x65, 0x72, 0x6F, 0x2C, 0x20, 0x74, 0x68, 0x65,
- 0x6E, 0x20, 0x6F, 0x6E, 0x63, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x66, 0x69, 0x72,
- 0x6D, 0x65, 0x64, 0x2C, 0x20, 0x69, 0x74, 0x20, 0x61, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x6C, 0x65, 0x61, 0x73, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6E,
- 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x28, 0x53,
- 0x65, 0x6E, 0x64, 0x65, 0x72, 0x2C, 0x20, 0x4C, 0x65, 0x61, 0x73, 0x65, 0x29, 0x20, 0x70, 0x61,
- 0x69, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x75, 0x6E, 0x74, 0x69, 0x6C, 0x5C, 0x6E, 0x74, 0x68, 0x65,
- 0x20, 0x4C, 0x61, 0x73, 0x74, 0x56, 0x61, 0x6C, 0x69, 0x64, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64,
- 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x73, 0x2E, 0x20, 0x20, 0x57, 0x68, 0x69, 0x6C, 0x65, 0x20,
- 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
- 0x20, 0x70, 0x6F, 0x73, 0x73, 0x65, 0x73, 0x73, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E,
- 0x6C, 0x65, 0x61, 0x73, 0x65, 0x2C, 0x20, 0x6E, 0x6F, 0x20, 0x6F, 0x74, 0x68, 0x65, 0x72, 0x20,
- 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73, 0x70, 0x65, 0x63,
- 0x69, 0x66, 0x79, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6C, 0x65, 0x61, 0x73,
- 0x65, 0x20, 0x63, 0x61, 0x6E, 0x20, 0x62, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D,
- 0x65, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x65, 0x61, 0x73, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x6E, 0x6F, 0x74, 0x65, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F, 0x74, 0x65, 0x20, 0x69, 0x73, 0x20, 0x61,
- 0x20, 0x66, 0x72, 0x65, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x6D, 0x20, 0x64, 0x61, 0x74, 0x61, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x4E, 0x6F, 0x74, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x61, 0x79, 0x6D,
- 0x65, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66,
- 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x50, 0x61, 0x79, 0x6D, 0x65, 0x6E, 0x74,
- 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x6F, 0x6F, 0x6C, 0x65, 0x72, 0x72, 0x6F, 0x72, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x6F, 0x6F,
- 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
- 0x20, 0x77, 0x61, 0x73, 0x20, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x20, 0x66, 0x72, 0x6F,
- 0x6D, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73, 0x20, 0x74, 0x72,
- 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x5C, 0x6E, 0x70, 0x6F, 0x6F, 0x6C, 0x20,
- 0x28, 0x69, 0x66, 0x20, 0x6E, 0x6F, 0x6E, 0x2D, 0x65, 0x6D, 0x70, 0x74, 0x79, 0x29, 0x2E, 0x20,
- 0x20, 0x41, 0x20, 0x6E, 0x6F, 0x6E, 0x2D, 0x65, 0x6D, 0x70, 0x74, 0x79, 0x20, 0x50, 0x6F, 0x6F,
- 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x20, 0x64, 0x6F, 0x65, 0x73, 0x20, 0x6E, 0x6F, 0x74, 0x20,
- 0x67, 0x75, 0x61, 0x72, 0x61, 0x6E, 0x74, 0x65, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74,
- 0x68, 0x65, 0x5C, 0x6E, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20,
- 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x6E, 0x65, 0x76, 0x65, 0x72, 0x20, 0x62, 0x65, 0x20, 0x63, 0x6F,
- 0x6D, 0x6D, 0x69, 0x74, 0x74, 0x65, 0x64, 0x3B, 0x20, 0x6F, 0x74, 0x68, 0x65, 0x72, 0x20, 0x6E,
- 0x6F, 0x64, 0x65, 0x73, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x68, 0x61, 0x76,
- 0x65, 0x20, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x74,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x6D,
- 0x61, 0x79, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6D, 0x70, 0x74, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6F,
- 0x6D, 0x6D, 0x69, 0x74, 0x20, 0x69, 0x74, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66,
- 0x75, 0x74, 0x75, 0x72, 0x65, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
- 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x6F, 0x6F,
- 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6F, 0x75, 0x6E,
- 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
- 0x43, 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69,
- 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6C, 0x6F,
- 0x63, 0x6B, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x70, 0x70, 0x65, 0x61,
- 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65,
- 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36,
- 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6E, 0x66,
- 0x69, 0x72, 0x6D, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x78, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
- 0x22, 0x54, 0x78, 0x49, 0x44, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61,
- 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x54, 0x78, 0x49, 0x44, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x72, 0x65,
- 0x73, 0x75, 0x6C, 0x74, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64,
- 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F,
+ 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x79, 0x70, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61,
- 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E,
- 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D,
- 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F,
- 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76,
- 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x3A, 0x20,
- 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x66, 0x65,
- 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72,
- 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x5C, 0x6E,
- 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x69, 0x6E, 0x20, 0x75, 0x6E, 0x69, 0x74, 0x73, 0x20,
- 0x6F, 0x66, 0x20, 0x6D, 0x69, 0x63, 0x72, 0x6F, 0x2D, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x70,
- 0x65, 0x72, 0x20, 0x62, 0x79, 0x74, 0x65, 0x2E, 0x5C, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x6D, 0x61,
- 0x79, 0x20, 0x66, 0x61, 0x6C, 0x6C, 0x20, 0x74, 0x6F, 0x20, 0x7A, 0x65, 0x72, 0x6F, 0x20, 0x62,
- 0x75, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20,
- 0x6D, 0x75, 0x73, 0x74, 0x20, 0x73, 0x74, 0x69, 0x6C, 0x6C, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20,
- 0x61, 0x20, 0x66, 0x65, 0x65, 0x20, 0x6F, 0x66, 0x5C, 0x6E, 0x61, 0x74, 0x20, 0x6C, 0x65, 0x61,
- 0x73, 0x74, 0x20, 0x4D, 0x69, 0x6E, 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x66, 0x6F, 0x72,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6E, 0x65, 0x74,
- 0x77, 0x6F, 0x72, 0x6B, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
- 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20,
+ 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66,
+ 0x20, 0x70, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73,
+ 0x20, 0x61, 0x70, 0x70, 0x6C, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x54, 0x6F, 0x20, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x5C, 0x6E, 0x61, 0x73, 0x20, 0x70,
+ 0x61, 0x72, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E,
+ 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E,
+ 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F,
+ 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22,
0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C,
0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61,
0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F,
0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
- 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69,
- 0x73, 0x74, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x6C, 0x69,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67,
+ 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65,
+ 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x61, 0x20, 0x70, 0x6F, 0x74, 0x65, 0x6E, 0x74, 0x69, 0x61,
+ 0x6C, 0x6C, 0x79, 0x20, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6C, 0x69,
0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
- 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69,
- 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20,
- 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x61, 0x72, 0x72, 0x61, 0x79, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x69, 0x74, 0x65,
- 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6E, 0x73, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x69, 0x6E, 0x20,
+ 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x6F, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F,
+ 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72,
+ 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x78,
+ 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F,
+ 0x74, 0x61, 0x6C, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78,
+ 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
+ 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
+ 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x54, 0x78, 0x6E,
+ 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x54, 0x78, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65,
0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
- 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61,
- 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72,
- 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64,
- 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
- 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20,
- 0x74, 0x68, 0x61, 0x74, 0x20, 0x68, 0x65, 0x6C, 0x70, 0x20, 0x61, 0x20, 0x63, 0x6C, 0x69, 0x65,
- 0x6E, 0x74, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5C, 0x6E, 0x61, 0x20,
- 0x6E, 0x65, 0x77, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E,
+ 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F,
+ 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F,
+ 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F,
+ 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x54, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E,
+ 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x61, 0x6C,
+ 0x20, 0x66, 0x69, 0x65, 0x6C, 0x64, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x20, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x20, 0x70, 0x72, 0x6F, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22,
+ 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x70, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x70, 0x6D, 0x73, 0x67,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x70, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x73, 0x67,
+ 0x70, 0x61, 0x63, 0x6B, 0x20, 0x65, 0x6E, 0x63, 0x6F, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x70, 0x72, 0x6F, 0x6F, 0x66,
+ 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
+ 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61,
+ 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x73, 0x70, 0x6D, 0x73, 0x67, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6F,
+ 0x6F, 0x66, 0x4D, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x6D, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6B, 0x20, 0x65, 0x6E, 0x63, 0x6F, 0x64, 0x69, 0x6E,
+ 0x67, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x70,
+ 0x72, 0x6F, 0x6F, 0x66, 0x20, 0x6D, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
+ 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x4D, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64,
+ 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65,
+ 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70,
+ 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x3A, 0x20, 0x6D, 0x6F, 0x64, 0x65, 0x6C, 0x20, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
+ 0x65, 0x6D, 0x61, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73,
+ 0x65, 0x6E, 0x74, 0x73, 0x20, 0x61, 0x20, 0x4C, 0x6F, 0x63, 0x61, 0x6C, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x20, 0x6F, 0x72, 0x20, 0x47, 0x6C, 0x6F, 0x62, 0x61,
+ 0x6C, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x2E, 0x20, 0x54, 0x68,
+ 0x65, 0x73, 0x65, 0x5C, 0x6E, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x73, 0x20, 0x64, 0x65, 0x74,
+ 0x65, 0x72, 0x6D, 0x69, 0x6E, 0x65, 0x20, 0x68, 0x6F, 0x77, 0x20, 0x6D, 0x75, 0x63, 0x68, 0x20,
+ 0x73, 0x74, 0x6F, 0x72, 0x61, 0x67, 0x65, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, 0x75,
+ 0x73, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x61, 0x20, 0x4C, 0x6F, 0x63, 0x61, 0x6C, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x20, 0x6F, 0x72, 0x5C, 0x6E, 0x47, 0x6C, 0x6F, 0x62, 0x61, 0x6C, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x70, 0x70, 0x6C, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6D, 0x6F, 0x72, 0x65,
+ 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x2C, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x6C, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x6D, 0x69, 0x6E, 0x69, 0x6D, 0x75, 0x6D, 0x5C,
+ 0x6E, 0x62, 0x61, 0x6C, 0x61, 0x6E, 0x63, 0x65, 0x20, 0x6D, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65,
+ 0x20, 0x6D, 0x61, 0x69, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x68, 0x6F, 0x6C, 0x64, 0x69,
+ 0x6E, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2E, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A,
+ 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
+ 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x6C, 0x69, 0x63, 0x65, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x62, 0x79, 0x74, 0x65, 0x73, 0x6C, 0x69, 0x63, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x42, 0x79, 0x74,
+ 0x65, 0x53, 0x6C, 0x69, 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x61,
+ 0x78, 0x69, 0x6D, 0x75, 0x6D, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20,
+ 0x54, 0x45, 0x41, 0x4C, 0x20, 0x62, 0x79, 0x74, 0x65, 0x20, 0x73, 0x6C, 0x69, 0x63, 0x65, 0x73,
+ 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62, 0x65, 0x5C, 0x6E, 0x73, 0x74,
+ 0x6F, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6B, 0x65, 0x79, 0x2F,
+ 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22,
+ 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x4E, 0x75, 0x6D, 0x42, 0x79, 0x74, 0x65, 0x53, 0x6C, 0x69, 0x63, 0x65, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x55, 0x69, 0x6E, 0x74, 0x20, 0x69,
+ 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6D, 0x61, 0x78, 0x69, 0x6D, 0x75, 0x6D, 0x20, 0x6E, 0x75,
+ 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x54, 0x45, 0x41, 0x4C, 0x20, 0x75, 0x69, 0x6E,
+ 0x74, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, 0x73,
+ 0x74, 0x6F, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x5C, 0x6E, 0x74, 0x68, 0x65, 0x20, 0x6B, 0x65,
+ 0x79, 0x2F, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x73, 0x74, 0x6F, 0x72, 0x65, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
+ 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x4E, 0x75, 0x6D, 0x55, 0x69, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61,
+ 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D,
+ 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67,
+ 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67,
+ 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53, 0x75, 0x70, 0x70,
+ 0x6C, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x75, 0x70,
+ 0x70, 0x6C, 0x79, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6E, 0x74, 0x73, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6C,
+ 0x79, 0x20, 0x6F, 0x66, 0x20, 0x4D, 0x69, 0x63, 0x72, 0x6F, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20,
+ 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6D, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F,
+ 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72,
+ 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65, 0x79,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6F, 0x6E, 0x6C, 0x69,
+ 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x6F, 0x6E, 0x6C, 0x69, 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4F, 0x6E, 0x6C, 0x69, 0x6E,
+ 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65,
+ 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36,
+ 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
+ 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4F, 0x6E, 0x6C, 0x69,
+ 0x6E, 0x65, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x6F, 0x75,
+ 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65,
+ 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36,
+ 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
+ 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x52, 0x6F, 0x75, 0x6E,
+ 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65,
+ 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
+ 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x54, 0x6F, 0x74, 0x61, 0x6C, 0x4D, 0x6F, 0x6E, 0x65, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61,
+ 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D,
+ 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67,
+ 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67,
+ 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x63, 0x6F,
+ 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x6C, 0x6C, 0x20, 0x66, 0x69, 0x65, 0x6C, 0x64,
+ 0x73, 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E, 0x20, 0x74, 0x6F, 0x20, 0x61, 0x6C, 0x6C, 0x20,
+ 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x61, 0x6E, 0x64,
+ 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x73, 0x20, 0x61, 0x73, 0x20, 0x61, 0x6E, 0x20, 0x65, 0x6E,
+ 0x76, 0x65, 0x6C, 0x6F, 0x70, 0x65, 0x20, 0x74, 0x6F, 0x20, 0x61, 0x6C, 0x6C, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x5C, 0x6E, 0x74, 0x79, 0x70, 0x65,
0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65,
- 0x73, 0x69, 0x73, 0x68, 0x61, 0x73, 0x68, 0x62, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6E, 0x73, 0x65,
- 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F,
- 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74,
+ 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x61, 0x70, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x73, 0x70, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67,
+ 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x68, 0x61, 0x73, 0x68, 0x62,
+ 0x36, 0x34, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x70, 0x70, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
+ 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
+ 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x70, 0x70, 0x6C, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E,
+ 0x43, 0x61, 0x6C, 0x6C, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54,
+ 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x75, 0x72, 0x63, 0x66, 0x67, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
+ 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
+ 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74, 0x43, 0x6F, 0x6E, 0x66, 0x69, 0x67,
+ 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x75, 0x72, 0x66, 0x72, 0x7A, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
+ 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74, 0x46, 0x72, 0x65, 0x65, 0x7A, 0x65, 0x54, 0x72, 0x61, 0x6E,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x63, 0x75, 0x72, 0x78, 0x66, 0x65, 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22,
+ 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73,
+ 0x73, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6E, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x66, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
+ 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x69, 0x72, 0x73, 0x74,
+ 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x69, 0x72, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20,
+ 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69,
+ 0x72, 0x73, 0x74, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20,
+ 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65,
+ 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66,
+ 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x69, 0x72, 0x73, 0x74, 0x52,
+ 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x72, 0x6F,
+ 0x6D, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6E, 0x64, 0x65, 0x72, 0x27,
+ 0x73, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x46, 0x72, 0x6F, 0x6D, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x72, 0x6F, 0x6D, 0x72, 0x65,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74,
- 0x68, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x70, 0x72, 0x6F,
- 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x5C, 0x6E, 0x61,
- 0x73, 0x20, 0x6F, 0x66, 0x20, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x2E, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
- 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65,
- 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74,
- 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66,
- 0x65, 0x65, 0x5C, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x69, 0x6E, 0x20, 0x75, 0x6E,
- 0x69, 0x74, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x6D, 0x69, 0x63, 0x72, 0x6F, 0x2D, 0x41, 0x6C, 0x67,
- 0x6F, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x62, 0x79, 0x74, 0x65, 0x2E, 0x5C, 0x6E, 0x46, 0x65,
- 0x65, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x66, 0x61, 0x6C, 0x6C, 0x20, 0x74, 0x6F, 0x20, 0x7A, 0x65,
- 0x72, 0x6F, 0x20, 0x62, 0x75, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x73, 0x20, 0x6D, 0x75, 0x73, 0x74, 0x20, 0x73, 0x74, 0x69, 0x6C, 0x6C, 0x20, 0x68,
- 0x61, 0x76, 0x65, 0x20, 0x61, 0x20, 0x66, 0x65, 0x65, 0x20, 0x6F, 0x66, 0x5C, 0x6E, 0x61, 0x74,
- 0x20, 0x6C, 0x65, 0x61, 0x73, 0x74, 0x20, 0x4D, 0x69, 0x6E, 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65,
- 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74,
- 0x20, 0x6E, 0x65, 0x74, 0x77, 0x6F, 0x72, 0x6B, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F,
- 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x22, 0x3A, 0x20, 0x22, 0x46, 0x72, 0x6F, 0x6D, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x20,
+ 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6D, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x6F, 0x66,
+ 0x20, 0x70, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x72, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73,
+ 0x20, 0x61, 0x70, 0x70, 0x6C, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x46, 0x72, 0x6F, 0x6D, 0x5C, 0x6E, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x61, 0x73,
+ 0x20, 0x70, 0x61, 0x72, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
+ 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x46, 0x72, 0x6F, 0x6D, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
@@ -3850,118 +3498,295 @@ func init() {
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
0x65, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x48, 0x61, 0x73, 0x68,
0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61,
- 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64,
- 0x20, 0x73, 0x65, 0x65, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67,
- 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
- 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52,
- 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6D, 0x69, 0x6E, 0x46, 0x65, 0x65,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54,
- 0x68, 0x65, 0x20, 0x6D, 0x69, 0x6E, 0x69, 0x6D, 0x75, 0x6D, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x20, 0x28, 0x6E, 0x6F, 0x74, 0x20,
- 0x70, 0x65, 0x72, 0x20, 0x62, 0x79, 0x74, 0x65, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72,
- 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x74, 0x78, 0x6E, 0x20,
- 0x74, 0x6F, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20,
- 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6E, 0x65, 0x74, 0x77,
- 0x6F, 0x72, 0x6B, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
- 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x4D, 0x69, 0x6E, 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63,
- 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63,
- 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61,
- 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61,
- 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31,
- 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72,
- 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x20, 0x63, 0x6F,
- 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69,
- 0x6F, 0x6E, 0x20, 0x61, 0x62, 0x6F, 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x69, 0x64,
- 0x65, 0x20, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x20, 0x74,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x72, 0x6F, 0x75, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x72, 0x6F, 0x75, 0x70, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22,
+ 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x47, 0x72, 0x6F, 0x75, 0x70, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6B, 0x65, 0x79,
+ 0x72, 0x65, 0x67, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66,
+ 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x4B, 0x65, 0x79, 0x72, 0x65, 0x67, 0x54,
+ 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x6C, 0x61, 0x73, 0x74, 0x2D, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73,
+ 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x61, 0x73, 0x74, 0x20, 0x76, 0x61, 0x6C, 0x69, 0x64, 0x20,
+ 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74,
0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75,
+ 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6C,
+ 0x65, 0x61, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x4C, 0x65, 0x61, 0x73, 0x65, 0x20, 0x65, 0x6E, 0x66, 0x6F, 0x72, 0x63, 0x65,
+ 0x73, 0x20, 0x6D, 0x75, 0x74, 0x75, 0x61, 0x6C, 0x20, 0x65, 0x78, 0x63, 0x6C, 0x75, 0x73, 0x69,
+ 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x73, 0x2E, 0x20, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x66, 0x69, 0x65,
+ 0x6C, 0x64, 0x20, 0x69, 0x73, 0x5C, 0x6E, 0x6E, 0x6F, 0x6E, 0x7A, 0x65, 0x72, 0x6F, 0x2C, 0x20,
+ 0x74, 0x68, 0x65, 0x6E, 0x20, 0x6F, 0x6E, 0x63, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6F, 0x6E,
+ 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x2C, 0x20, 0x69, 0x74, 0x20, 0x61, 0x63, 0x71, 0x75, 0x69,
+ 0x72, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x6C, 0x65, 0x61, 0x73, 0x65, 0x20, 0x69,
+ 0x64, 0x65, 0x6E, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x28, 0x53, 0x65, 0x6E, 0x64, 0x65, 0x72, 0x2C, 0x20, 0x4C, 0x65, 0x61, 0x73, 0x65, 0x29,
+ 0x20, 0x70, 0x61, 0x69, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x75, 0x6E, 0x74, 0x69, 0x6C, 0x5C, 0x6E,
+ 0x74, 0x68, 0x65, 0x20, 0x4C, 0x61, 0x73, 0x74, 0x56, 0x61, 0x6C, 0x69, 0x64, 0x20, 0x72, 0x6F,
+ 0x75, 0x6E, 0x64, 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x73, 0x2E, 0x20, 0x20, 0x57, 0x68, 0x69,
+ 0x6C, 0x65, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x20, 0x70, 0x6F, 0x73, 0x73, 0x65, 0x73, 0x73, 0x65, 0x73, 0x20, 0x74, 0x68,
+ 0x65, 0x5C, 0x6E, 0x6C, 0x65, 0x61, 0x73, 0x65, 0x2C, 0x20, 0x6E, 0x6F, 0x20, 0x6F, 0x74, 0x68,
+ 0x65, 0x72, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x73,
+ 0x70, 0x65, 0x63, 0x69, 0x66, 0x79, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6C,
+ 0x65, 0x61, 0x73, 0x65, 0x20, 0x63, 0x61, 0x6E, 0x20, 0x62, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x66,
+ 0x69, 0x72, 0x6D, 0x65, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
+ 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
+ 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x65, 0x61, 0x73, 0x65, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x6E, 0x6F, 0x74, 0x65, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F, 0x74, 0x65, 0x20, 0x69,
+ 0x73, 0x20, 0x61, 0x20, 0x66, 0x72, 0x65, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x6D, 0x20, 0x64, 0x61,
+ 0x74, 0x61, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D,
+ 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F, 0x74, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70,
+ 0x61, 0x79, 0x6D, 0x65, 0x6E, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F,
+ 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x50, 0x61, 0x79, 0x6D,
+ 0x65, 0x6E, 0x74, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x54, 0x79,
+ 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x6F, 0x6F, 0x6C, 0x65, 0x72, 0x72, 0x6F,
+ 0x72, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x50, 0x6F, 0x6F, 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x20, 0x77, 0x61, 0x73, 0x20, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x20,
+ 0x66, 0x72, 0x6F, 0x6D, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73,
+ 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x5C, 0x6E, 0x70, 0x6F,
+ 0x6F, 0x6C, 0x20, 0x28, 0x69, 0x66, 0x20, 0x6E, 0x6F, 0x6E, 0x2D, 0x65, 0x6D, 0x70, 0x74, 0x79,
+ 0x29, 0x2E, 0x20, 0x20, 0x41, 0x20, 0x6E, 0x6F, 0x6E, 0x2D, 0x65, 0x6D, 0x70, 0x74, 0x79, 0x20,
+ 0x50, 0x6F, 0x6F, 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x20, 0x64, 0x6F, 0x65, 0x73, 0x20, 0x6E,
+ 0x6F, 0x74, 0x20, 0x67, 0x75, 0x61, 0x72, 0x61, 0x6E, 0x74, 0x65, 0x65, 0x20, 0x74, 0x68, 0x61,
+ 0x74, 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x20, 0x77, 0x69, 0x6C, 0x6C, 0x20, 0x6E, 0x65, 0x76, 0x65, 0x72, 0x20, 0x62, 0x65,
+ 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x74, 0x65, 0x64, 0x3B, 0x20, 0x6F, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x73, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x6E, 0x6F, 0x74, 0x20,
+ 0x68, 0x61, 0x76, 0x65, 0x20, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x20, 0x74, 0x68, 0x65,
+ 0x5C, 0x6E, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x6E,
+ 0x64, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6D, 0x70, 0x74, 0x20, 0x74, 0x6F,
+ 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x69, 0x74, 0x20, 0x69, 0x74, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x50, 0x6F, 0x6F, 0x6C, 0x45, 0x72, 0x72, 0x6F, 0x72, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72,
+ 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x43, 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E,
+ 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x74, 0x68, 0x69,
+ 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x70,
+ 0x70, 0x65, 0x61, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69,
+ 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69,
+ 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43,
+ 0x6F, 0x6E, 0x66, 0x69, 0x72, 0x6D, 0x65, 0x64, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x73, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64,
+ 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x50, 0x72, 0x6F, 0x6F, 0x66, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49, 0x44, 0x20,
+ 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x20, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
+ 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49, 0x44,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x72, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
+ 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
+ 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x54, 0x79, 0x70, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x74, 0x79, 0x70, 0x65, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x54, 0x79, 0x70, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C,
+ 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61,
+ 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F,
+ 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65,
+ 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x75,
+ 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A,
+ 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71,
+ 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69,
+ 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x66, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x5C, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73,
+ 0x20, 0x69, 0x6E, 0x20, 0x75, 0x6E, 0x69, 0x74, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x6D, 0x69, 0x63,
+ 0x72, 0x6F, 0x2D, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x62, 0x79, 0x74,
+ 0x65, 0x2E, 0x5C, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x6D, 0x61, 0x79, 0x20, 0x66, 0x61, 0x6C, 0x6C,
+ 0x20, 0x74, 0x6F, 0x20, 0x7A, 0x65, 0x72, 0x6F, 0x20, 0x62, 0x75, 0x74, 0x20, 0x74, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x6D, 0x75, 0x73, 0x74, 0x20, 0x73,
+ 0x74, 0x69, 0x6C, 0x6C, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20, 0x66, 0x65, 0x65, 0x20,
+ 0x6F, 0x66, 0x5C, 0x6E, 0x61, 0x74, 0x20, 0x6C, 0x65, 0x61, 0x73, 0x74, 0x20, 0x4D, 0x69, 0x6E,
+ 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6E, 0x65, 0x74, 0x77, 0x6F, 0x72, 0x6B, 0x20, 0x70,
+ 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E,
+ 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E,
+ 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65,
+ 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64,
+ 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65,
+ 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70,
+ 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69,
+ 0x73, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x20, 0x63, 0x6F, 0x6E,
+ 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20,
+ 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62,
+ 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x4C, 0x69, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F,
+ 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x61, 0x72, 0x72, 0x61, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x69, 0x74, 0x65, 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72,
+ 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69,
+ 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61,
+ 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
+ 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E,
+ 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61,
+ 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73,
+ 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50,
+ 0x61, 0x72, 0x61, 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D,
+ 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70,
+ 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x68,
+ 0x65, 0x6C, 0x70, 0x20, 0x61, 0x20, 0x63, 0x6C, 0x69, 0x65, 0x6E, 0x74, 0x20, 0x63, 0x6F, 0x6E,
+ 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5C, 0x6E, 0x61, 0x20, 0x6E, 0x65, 0x77, 0x20, 0x74, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65,
- 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70,
- 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x61, 0x70, 0x70, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x64, 0x41, 0x70, 0x70, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x20, 0x69, 0x6E, 0x64,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x70, 0x70, 0x20, 0x69,
- 0x6E, 0x64, 0x65, 0x78, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x70, 0x70, 0x20, 0x63,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74,
- 0x78, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72,
- 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D,
- 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
- 0x70, 0x70, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x64, 0x61, 0x73, 0x73, 0x65, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x68, 0x61, 0x73,
+ 0x68, 0x62, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65,
+ 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63,
+ 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x6F,
+ 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x69,
+ 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6F, 0x6E,
+ 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x20,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x5C, 0x6E, 0x61, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x4C,
+ 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x43, 0x6F, 0x6E, 0x73, 0x65, 0x6E, 0x73, 0x75, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
- 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6E,
- 0x64, 0x65, 0x78, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20,
- 0x74, 0x78, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F,
- 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
- 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61,
- 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D,
- 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67,
- 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67,
- 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F,
- 0x74, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x77, 0x65, 0x20, 0x61, 0x6E, 0x6E, 0x6F, 0x74,
- 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x6D, 0x6F,
- 0x64, 0x65, 0x6C, 0x20, 0x73, 0x6F, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6C, 0x65, 0x67, 0x61,
- 0x63, 0x79, 0x20, 0x63, 0x6C, 0x69, 0x65, 0x6E, 0x74, 0x73, 0x5C, 0x6E, 0x63, 0x61, 0x6E, 0x20,
- 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6C, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6F, 0x72, 0x74, 0x20,
- 0x61, 0x20, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x20, 0x67, 0x65, 0x6E, 0x65, 0x72, 0x61,
- 0x74, 0x65, 0x64, 0x20, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x6D, 0x6F, 0x64, 0x65,
- 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x74, 0x69, 0x74, 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68,
- 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x20,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65,
- 0x73, 0x69, 0x73, 0x5F, 0x69, 0x64, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x5F, 0x68, 0x61, 0x73, 0x68, 0x5F, 0x62,
- 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75,
- 0x69, 0x6C, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22,
- 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75, 0x69,
- 0x6C, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69,
- 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69,
- 0x73, 0x5F, 0x68, 0x61, 0x73, 0x68, 0x5F, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x46, 0x65, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x66, 0x65, 0x65, 0x5C, 0x6E, 0x46, 0x65,
+ 0x65, 0x20, 0x69, 0x73, 0x20, 0x69, 0x6E, 0x20, 0x75, 0x6E, 0x69, 0x74, 0x73, 0x20, 0x6F, 0x66,
+ 0x20, 0x6D, 0x69, 0x63, 0x72, 0x6F, 0x2D, 0x41, 0x6C, 0x67, 0x6F, 0x73, 0x20, 0x70, 0x65, 0x72,
+ 0x20, 0x62, 0x79, 0x74, 0x65, 0x2E, 0x5C, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x6D, 0x61, 0x79, 0x20,
+ 0x66, 0x61, 0x6C, 0x6C, 0x20, 0x74, 0x6F, 0x20, 0x7A, 0x65, 0x72, 0x6F, 0x20, 0x62, 0x75, 0x74,
+ 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x6D, 0x75,
+ 0x73, 0x74, 0x20, 0x73, 0x74, 0x69, 0x6C, 0x6C, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20,
+ 0x66, 0x65, 0x65, 0x20, 0x6F, 0x66, 0x5C, 0x6E, 0x61, 0x74, 0x20, 0x6C, 0x65, 0x61, 0x73, 0x74,
+ 0x20, 0x4D, 0x69, 0x6E, 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6E, 0x65, 0x74, 0x77, 0x6F,
+ 0x72, 0x6B, 0x20, 0x70, 0x72, 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
+ 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73,
+ 0x69, 0x73, 0x49, 0x44, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x20, 0x49, 0x44, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65,
+ 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x68, 0x61, 0x73, 0x68, 0x62, 0x36,
+ 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22,
@@ -3969,243 +3794,362 @@ func init() {
0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x47,
0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x5F, 0x69, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x61, 0x72, 0x72, 0x61, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x69, 0x74, 0x65, 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65,
- 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67,
- 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F,
- 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F,
- 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F,
- 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x63, 0x6F, 0x6D, 0x6D, 0x6F,
- 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x49, 0x44, 0x20, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x49, 0x64, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72,
- 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x78, 0x49, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x6C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E,
+ 0x64, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x6C, 0x61, 0x73, 0x74, 0x20, 0x72, 0x6F, 0x75, 0x6E, 0x64, 0x20, 0x73, 0x65, 0x65, 0x6E, 0x22,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
+ 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D,
+ 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4C, 0x61, 0x73, 0x74, 0x52, 0x6F, 0x75, 0x6E, 0x64, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x6D, 0x69, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49, 0x64, 0x20, 0x69, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x65, 0x6E, 0x63, 0x6F,
- 0x64, 0x69, 0x6E, 0x67, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x68, 0x65, 0x20, 0x6D, 0x69, 0x6E,
+ 0x69, 0x6D, 0x75, 0x6D, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x20, 0x66, 0x65, 0x65, 0x20, 0x28, 0x6E, 0x6F, 0x74, 0x20, 0x70, 0x65, 0x72, 0x20, 0x62, 0x79,
+ 0x74, 0x65, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6F, 0x72,
+ 0x20, 0x74, 0x68, 0x65, 0x5C, 0x6E, 0x74, 0x78, 0x6E, 0x20, 0x74, 0x6F, 0x20, 0x76, 0x61, 0x6C,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6F, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6E, 0x74, 0x20, 0x6E, 0x65, 0x74, 0x77, 0x6F, 0x72, 0x6B, 0x20, 0x70, 0x72,
+ 0x6F, 0x74, 0x6F, 0x63, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74,
+ 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74,
+ 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x4D, 0x69, 0x6E,
+ 0x54, 0x78, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67,
+ 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E,
+ 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61,
+ 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x52, 0x65, 0x73, 0x75, 0x6C, 0x74, 0x73, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73,
+ 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x61, 0x62, 0x6F,
+ 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x69, 0x64, 0x65, 0x20, 0x65, 0x66, 0x66, 0x65,
+ 0x63, 0x74, 0x73, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79,
+ 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73,
+ 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x64, 0x61, 0x70, 0x70, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x70,
+ 0x70, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x70, 0x70, 0x20, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x20, 0x6F,
+ 0x66, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x70, 0x70, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x78, 0x6E, 0x22, 0x2C, 0x0A, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A,
- 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20,
+ 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
+ 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x70, 0x70, 0x49, 0x6E, 0x64, 0x65,
+ 0x78, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x61, 0x73,
+ 0x73, 0x65, 0x74, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A,
+ 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E,
+ 0x64, 0x65, 0x78, 0x20, 0x69, 0x6E, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x20, 0x6F, 0x66,
+ 0x20, 0x61, 0x6E, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x78, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x69, 0x6E, 0x74, 0x65, 0x67, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A,
+ 0x20, 0x22, 0x75, 0x69, 0x6E, 0x74, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22,
- 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49, 0x44, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22,
- 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B,
- 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F,
- 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C,
- 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C,
- 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x22, 0x72,
- 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x41, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61,
- 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x49, 0x6E,
- 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73,
- 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x63,
- 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D,
- 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
- 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69,
- 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D,
- 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F,
- 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6E, 0x66,
- 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x3A, 0x20, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49,
+ 0x6E, 0x64, 0x65, 0x78, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72,
+ 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F,
+ 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69,
+ 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x4E, 0x6F, 0x74, 0x65, 0x20, 0x74, 0x68, 0x61,
+ 0x74, 0x20, 0x77, 0x65, 0x20, 0x61, 0x6E, 0x6E, 0x6F, 0x74, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68,
+ 0x69, 0x73, 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x6D, 0x6F, 0x64, 0x65, 0x6C, 0x20, 0x73, 0x6F,
+ 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6C, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x63, 0x6C, 0x69,
+ 0x65, 0x6E, 0x74, 0x73, 0x5C, 0x6E, 0x63, 0x61, 0x6E, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6C, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6F, 0x72, 0x74, 0x20, 0x61, 0x20, 0x73, 0x77, 0x61, 0x67,
+ 0x67, 0x65, 0x72, 0x20, 0x67, 0x65, 0x6E, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x6D, 0x6F, 0x64, 0x65, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62,
+ 0x6A, 0x65, 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x69,
+ 0x74, 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x63,
+ 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72,
+ 0x65, 0x6E, 0x74, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F,
+ 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x5F, 0x69, 0x64,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65,
+ 0x73, 0x69, 0x73, 0x5F, 0x68, 0x61, 0x73, 0x68, 0x5F, 0x62, 0x36, 0x34, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x22, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70,
+ 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66,
+ 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E,
+ 0x73, 0x2F, 0x42, 0x75, 0x69, 0x6C, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73, 0x5F, 0x68, 0x61, 0x73, 0x68,
+ 0x5F, 0x62, 0x36, 0x34, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
+ 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x79, 0x74, 0x65, 0x22, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67, 0x6F,
+ 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x73, 0x69, 0x73,
+ 0x48, 0x61, 0x73, 0x68, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x67, 0x65, 0x6E, 0x65, 0x73, 0x69,
+ 0x73, 0x5F, 0x69, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
+ 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E,
+ 0x65, 0x73, 0x69, 0x73, 0x49, 0x44, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x61, 0x72, 0x72, 0x61,
+ 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x69,
+ 0x74, 0x65, 0x6D, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D,
+ 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78,
+ 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61,
+ 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x64,
+ 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70, 0x69, 0x2F,
+ 0x73, 0x70, 0x65, 0x63, 0x2F, 0x63, 0x6F, 0x6D, 0x6D, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x20, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x6F, 0x62, 0x6A, 0x65,
+ 0x63, 0x74, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x72, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x74, 0x78, 0x49, 0x64, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5D, 0x2C,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x70, 0x72, 0x6F, 0x70, 0x65, 0x72, 0x74, 0x69,
+ 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x74, 0x78, 0x49, 0x64, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x20, 0x65, 0x6E, 0x63, 0x6F, 0x64, 0x69, 0x6E, 0x67, 0x20, 0x6F,
+ 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x20, 0x68, 0x61, 0x73, 0x68, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69,
+ 0x6E, 0x67, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x78, 0x49,
+ 0x44, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x67,
+ 0x6F, 0x2D, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x78, 0x2D, 0x67, 0x6F, 0x2D, 0x70, 0x61, 0x63, 0x6B, 0x61, 0x67, 0x65, 0x22, 0x3A, 0x20,
+ 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x61, 0x6C, 0x67, 0x6F,
+ 0x72, 0x61, 0x6E, 0x64, 0x2F, 0x67, 0x6F, 0x2D, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x61, 0x6E, 0x64,
+ 0x2F, 0x64, 0x61, 0x65, 0x6D, 0x6F, 0x6E, 0x2F, 0x61, 0x6C, 0x67, 0x6F, 0x64, 0x2F, 0x61, 0x70,
+ 0x69, 0x2F, 0x73, 0x70, 0x65, 0x63, 0x2F, 0x76, 0x31, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x0A, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x22, 0x72, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73,
+ 0x65, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x41, 0x63, 0x63, 0x6F,
+ 0x75, 0x6E, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65,
+ 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x41, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
+ 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74,
+ 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x6E, 0x20, 0x61, 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x20,
+ 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20,
+ 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41,
+ 0x63, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x41, 0x73, 0x73, 0x65,
+ 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70,
+ 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41,
+ 0x73, 0x73, 0x65, 0x74, 0x49, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x52,
+ 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73,
+ 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69,
+ 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65,
+ 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
+ 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69,
+ 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6D,
+ 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63,
+ 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F,
+ 0x66, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F,
0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x41, 0x73, 0x73,
- 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x74, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x42, 0x6C, 0x6F, 0x63, 0x6B,
+ 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65,
+ 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20,
+ 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20,
+ 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x42,
+ 0x6C, 0x6F, 0x63, 0x6B, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E,
+ 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22,
+ 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74,
+ 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x28, 0x70, 0x6F, 0x74, 0x65, 0x6E, 0x74, 0x69, 0x61,
+ 0x6C, 0x6C, 0x79, 0x20, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x20, 0x6C,
+ 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6F, 0x6E, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x5C, 0x6E, 0x74, 0x68, 0x65, 0x20, 0x74, 0x6F, 0x74,
+ 0x61, 0x6C, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E,
+ 0x74, 0x6C, 0x79, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6F, 0x6F, 0x6C, 0x2E,
+ 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61,
+ 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72,
+ 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69,
+ 0x6F, 0x6E, 0x73, 0x2F, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68,
+ 0x65, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73, 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x20,
+ 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20,
+ 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x4E,
+ 0x6F, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x53,
+ 0x75, 0x70, 0x70, 0x6C, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x52, 0x65,
+ 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x6C, 0x65, 0x64, 0x67, 0x65, 0x72, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6C,
+ 0x79, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
+ 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2F, 0x53, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61,
+ 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6F,
+ 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72,
+ 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20,
- 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x22, 0x2C,
+ 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
+ 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x22,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F,
+ 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E,
+ 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61,
+ 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
+ 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2F, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50,
+ 0x61, 0x72, 0x61, 0x6D, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73,
+ 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70,
+ 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x66, 0x6F, 0x72, 0x5C, 0x6E, 0x63,
+ 0x6F, 0x6E, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6E, 0x67, 0x20, 0x61, 0x20, 0x6E, 0x65,
+ 0x77, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A,
0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66,
0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E,
- 0x73, 0x2F, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A,
- 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x52, 0x65,
- 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20,
- 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D,
- 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
- 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E, 0x67, 0x54, 0x72, 0x61,
- 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73,
- 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x28, 0x70, 0x6F,
- 0x74, 0x65, 0x6E, 0x74, 0x69, 0x61, 0x6C, 0x6C, 0x79, 0x20, 0x74, 0x72, 0x75, 0x6E, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x29, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61,
- 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20, 0x61, 0x6E, 0x64, 0x5C, 0x6E, 0x74,
- 0x68, 0x65, 0x20, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x20, 0x6E, 0x75, 0x6D, 0x62, 0x65, 0x72, 0x20,
- 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x20,
- 0x63, 0x75, 0x72, 0x72, 0x65, 0x6E, 0x74, 0x6C, 0x79, 0x20, 0x69, 0x6E, 0x20, 0x74, 0x68, 0x65,
- 0x20, 0x70, 0x6F, 0x6F, 0x6C, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65,
- 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x50, 0x65, 0x6E, 0x64, 0x69, 0x6E,
- 0x67, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73,
- 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61,
- 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6E, 0x6F, 0x64, 0x65, 0x27, 0x73, 0x20, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F,
- 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D,
- 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24,
- 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x4E, 0x6F, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22,
- 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x53, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6F,
- 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x53, 0x75,
- 0x70, 0x70, 0x6C, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E,
- 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6C, 0x65, 0x64, 0x67, 0x65, 0x72,
- 0x20, 0x73, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68,
- 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E,
- 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x53, 0x75, 0x70, 0x70, 0x6C, 0x79, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46, 0x65,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x46,
- 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61,
- 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20,
- 0x66, 0x65, 0x65, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68,
- 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E,
- 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x46, 0x65, 0x65, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65,
- 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65,
- 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68,
- 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E,
- 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x49, 0x44, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F,
- 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72,
- 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73,
- 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20,
- 0x66, 0x6F, 0x72, 0x5C, 0x6E, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6E,
- 0x67, 0x20, 0x61, 0x20, 0x6E, 0x65, 0x77, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x2E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63,
- 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69,
- 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72, 0x61, 0x6D, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E,
- 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61,
- 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65,
- 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x6E,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74,
- 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68,
- 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E,
- 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B,
+ 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x50, 0x61, 0x72,
+ 0x61, 0x6D, 0x73, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74,
- 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20,
- 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54,
- 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20,
- 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20,
- 0x74, 0x6F, 0x20, 0x27, 0x47, 0x45, 0x54, 0x20, 0x2F, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
- 0x73, 0x27, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65,
- 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69,
- 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x7D,
- 0x2C, 0x0A, 0x20, 0x20, 0x22, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66,
- 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x22, 0x61, 0x70, 0x69, 0x5F, 0x6B, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E,
- 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65, 0x6E, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x68, 0x65,
- 0x61, 0x64, 0x65, 0x72, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x2E, 0x20,
- 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x6F, 0x6B, 0x65, 0x6E, 0x20, 0x63, 0x61, 0x6E, 0x20, 0x62,
- 0x65, 0x20, 0x67, 0x65, 0x6E, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x75, 0x73, 0x69, 0x6E,
- 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x47, 0x6F, 0x61, 0x6C, 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x61,
- 0x6E, 0x64, 0x20, 0x6C, 0x69, 0x6E, 0x65, 0x20, 0x74, 0x6F, 0x6F, 0x6C, 0x2E, 0x20, 0x45, 0x78,
- 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x20, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x3D, 0x27, 0x62, 0x37,
- 0x65, 0x33, 0x38, 0x34, 0x64, 0x30, 0x33, 0x31, 0x37, 0x62, 0x38, 0x30, 0x35, 0x30, 0x63, 0x65,
- 0x34, 0x35, 0x39, 0x30, 0x30, 0x61, 0x39, 0x34, 0x61, 0x31, 0x39, 0x33, 0x31, 0x65, 0x32, 0x38,
- 0x35, 0x34, 0x30, 0x65, 0x31, 0x66, 0x36, 0x39, 0x62, 0x32, 0x64, 0x32, 0x34, 0x32, 0x62, 0x34,
- 0x32, 0x34, 0x36, 0x35, 0x39, 0x63, 0x33, 0x34, 0x31, 0x62, 0x34, 0x36, 0x39, 0x37, 0x27, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20,
- 0x22, 0x61, 0x70, 0x69, 0x4B, 0x65, 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x22, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x58, 0x2D, 0x41, 0x6C, 0x67, 0x6F, 0x2D,
- 0x41, 0x50, 0x49, 0x2D, 0x54, 0x6F, 0x6B, 0x65, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x22, 0x69, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22,
- 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x78, 0x2D, 0x65, 0x78, 0x61, 0x6D, 0x70,
- 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x62, 0x37, 0x65, 0x33, 0x38, 0x34, 0x64, 0x30, 0x33, 0x31,
- 0x37, 0x62, 0x38, 0x30, 0x35, 0x30, 0x63, 0x65, 0x34, 0x35, 0x39, 0x30, 0x30, 0x61, 0x39, 0x34,
- 0x61, 0x31, 0x39, 0x33, 0x31, 0x65, 0x32, 0x38, 0x35, 0x34, 0x30, 0x65, 0x31, 0x66, 0x36, 0x39,
- 0x62, 0x32, 0x64, 0x32, 0x34, 0x32, 0x62, 0x34, 0x32, 0x34, 0x36, 0x35, 0x39, 0x63, 0x33, 0x34,
- 0x31, 0x62, 0x34, 0x36, 0x39, 0x37, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20,
- 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x22, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x22, 0x3A,
- 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22,
- 0x61, 0x70, 0x69, 0x5F, 0x6B, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x5B, 0x5D, 0x0A, 0x20, 0x20, 0x20,
- 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x5D, 0x0A, 0x7D,
+ 0x6F, 0x6E, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61,
+ 0x69, 0x6E, 0x73, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F,
+ 0x6E, 0x20, 0x69, 0x6E, 0x66, 0x6F, 0x72, 0x6D, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x2C, 0x0A,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20,
+ 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22,
+ 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73,
+ 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20,
+ 0x22, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x63, 0x6F, 0x6E, 0x74, 0x61, 0x69, 0x6E, 0x73, 0x20, 0x61,
+ 0x20, 0x6C, 0x69, 0x73, 0x74, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73,
+ 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A, 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66,
+ 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6F, 0x6E, 0x4C, 0x69, 0x73, 0x74, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x22, 0x3A,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x72, 0x65, 0x73, 0x70, 0x6F, 0x6E, 0x73, 0x65, 0x20, 0x74, 0x6F, 0x20, 0x27, 0x47, 0x45,
+ 0x54, 0x20, 0x2F, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x73, 0x27, 0x22, 0x2C, 0x0A, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x22, 0x3A, 0x20, 0x7B,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x24, 0x72, 0x65, 0x66, 0x22, 0x3A,
+ 0x20, 0x22, 0x23, 0x2F, 0x64, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x2F,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7D,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x22, 0x73,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6E, 0x69, 0x74, 0x69, 0x6F,
+ 0x6E, 0x73, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x70, 0x69, 0x5F,
+ 0x6B, 0x65, 0x79, 0x22, 0x3A, 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6F, 0x6E, 0x22, 0x3A, 0x20, 0x22, 0x47, 0x65,
+ 0x6E, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x70,
+ 0x61, 0x72, 0x61, 0x6D, 0x65, 0x74, 0x65, 0x72, 0x2E, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x74,
+ 0x6F, 0x6B, 0x65, 0x6E, 0x20, 0x63, 0x61, 0x6E, 0x20, 0x62, 0x65, 0x20, 0x67, 0x65, 0x6E, 0x65,
+ 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x75, 0x73, 0x69, 0x6E, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20,
+ 0x47, 0x6F, 0x61, 0x6C, 0x20, 0x63, 0x6F, 0x6D, 0x6D, 0x61, 0x6E, 0x64, 0x20, 0x6C, 0x69, 0x6E,
+ 0x65, 0x20, 0x74, 0x6F, 0x6F, 0x6C, 0x2E, 0x20, 0x45, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x20,
+ 0x76, 0x61, 0x6C, 0x75, 0x65, 0x20, 0x3D, 0x27, 0x62, 0x37, 0x65, 0x33, 0x38, 0x34, 0x64, 0x30,
+ 0x33, 0x31, 0x37, 0x62, 0x38, 0x30, 0x35, 0x30, 0x63, 0x65, 0x34, 0x35, 0x39, 0x30, 0x30, 0x61,
+ 0x39, 0x34, 0x61, 0x31, 0x39, 0x33, 0x31, 0x65, 0x32, 0x38, 0x35, 0x34, 0x30, 0x65, 0x31, 0x66,
+ 0x36, 0x39, 0x62, 0x32, 0x64, 0x32, 0x34, 0x32, 0x62, 0x34, 0x32, 0x34, 0x36, 0x35, 0x39, 0x63,
+ 0x33, 0x34, 0x31, 0x62, 0x34, 0x36, 0x39, 0x37, 0x27, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x20, 0x22, 0x61, 0x70, 0x69, 0x4B, 0x65,
+ 0x79, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x6E, 0x61, 0x6D, 0x65, 0x22,
+ 0x3A, 0x20, 0x22, 0x58, 0x2D, 0x41, 0x6C, 0x67, 0x6F, 0x2D, 0x41, 0x50, 0x49, 0x2D, 0x54, 0x6F,
+ 0x6B, 0x65, 0x6E, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x69, 0x6E, 0x22,
+ 0x3A, 0x20, 0x22, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x2C, 0x0A, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x22, 0x78, 0x2D, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x22, 0x3A, 0x20, 0x22,
+ 0x62, 0x37, 0x65, 0x33, 0x38, 0x34, 0x64, 0x30, 0x33, 0x31, 0x37, 0x62, 0x38, 0x30, 0x35, 0x30,
+ 0x63, 0x65, 0x34, 0x35, 0x39, 0x30, 0x30, 0x61, 0x39, 0x34, 0x61, 0x31, 0x39, 0x33, 0x31, 0x65,
+ 0x32, 0x38, 0x35, 0x34, 0x30, 0x65, 0x31, 0x66, 0x36, 0x39, 0x62, 0x32, 0x64, 0x32, 0x34, 0x32,
+ 0x62, 0x34, 0x32, 0x34, 0x36, 0x35, 0x39, 0x63, 0x33, 0x34, 0x31, 0x62, 0x34, 0x36, 0x39, 0x37,
+ 0x22, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x7D, 0x2C, 0x0A, 0x20, 0x20, 0x22,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x22, 0x3A, 0x20, 0x5B, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x7B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x61, 0x70, 0x69, 0x5F, 0x6B, 0x65,
+ 0x79, 0x22, 0x3A, 0x20, 0x5B, 0x5D, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x7D, 0x0A, 0x20, 0x20, 0x5D,
+ 0x0A, 0x7D,
})
}
diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go
index c1f08ffd2..d01034fec 100644
--- a/daemon/algod/api/server/v1/handlers/handlers.go
+++ b/daemon/algod/api/server/v1/handlers/handlers.go
@@ -93,8 +93,8 @@ func txEncode(tx transactions.Transaction, ad transactions.ApplyData) (v1.Transa
res = assetFreezeTxEncode(tx, ad)
case protocol.ApplicationCallTx:
res = applicationCallTxEncode(tx, ad)
- case protocol.CompactCertTx:
- res = compactCertTxEncode(tx, ad)
+ case protocol.StateProofTx:
+ res = stateProofTxEncode(tx)
default:
return res, errors.New(errUnknownTransactionType)
}
@@ -351,14 +351,14 @@ func assetFreezeTxEncode(tx transactions.Transaction, ad transactions.ApplyData)
}
}
-func compactCertTxEncode(tx transactions.Transaction, ad transactions.ApplyData) v1.Transaction {
- cc := v1.CompactCertTransactionType{
- CertRound: uint64(tx.CompactCertTxnFields.CertRound),
- Cert: protocol.Encode(&tx.CompactCertTxnFields.Cert),
+func stateProofTxEncode(tx transactions.Transaction) v1.Transaction {
+ sp := v1.StateProofTransactionType{
+ StateProof: protocol.Encode(&tx.StateProofTxnFields.StateProof),
+ StateProofMessage: protocol.Encode(&tx.Message),
}
return v1.Transaction{
- CompactCert: &cc,
+ StateProof: &sp,
}
}
@@ -504,13 +504,6 @@ func blockEncode(b bookkeeping.Block, c agreement.Certificate) (v1.Block, error)
UpgradePropose: string(b.UpgradePropose),
UpgradeApprove: b.UpgradeApprove,
},
- CompactCertVotersTotal: b.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal.ToUint64(),
- CompactCertNextRound: uint64(b.CompactCert[protocol.CompactCertBasic].CompactCertNextRound),
- }
-
- if !b.CompactCert[protocol.CompactCertBasic].CompactCertVoters.IsEmpty() {
- voters := b.CompactCert[protocol.CompactCertBasic].CompactCertVoters
- block.CompactCertVoters = voters[:]
}
// Transactions
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index d3b85de39..3079a71aa 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -243,6 +243,10 @@ func (dl *dryrunLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
return bookkeeping.BlockHeader{}, nil
}
+func (dl *dryrunLedger) BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, nil
+}
+
func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
@@ -414,6 +418,7 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
if len(stxn.Lsig.Logic) > 0 {
var debug dryrunDebugReceiver
ep.Debugger = &debug
+ ep.SigLedger = &dl
pass, err := logic.EvalSignature(ti, ep)
var messages []string
result.Disassembly = debug.lines // Keep backwards compat
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index 9966728dc..f20df6f2f 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -34,7 +34,7 @@ var (
errFailedToParseSourcemap = "failed to parse sourcemap"
errFailedToEncodeResponse = "failed to encode response"
errInternalFailure = "internal failure"
- errNoTxnSpecified = "no transaction ID was specified"
+ errNoValidTxnSpecified = "no valid transaction ID was specified"
errInvalidHashType = "invalid hash type"
errTransactionNotFound = "could not find the transaction in the transaction pool or in the last 1000 confirmed rounds"
errServiceShuttingDown = "operation aborted as server is shutting down"
@@ -44,4 +44,5 @@ var (
errFailedToStartCatchup = "failed to start catchup : %v"
errOperationNotAvailableDuringCatchup = "operation not available during catchup"
errRESTPayloadZeroLength = "payload was of zero length"
+ errRoundGreaterThanTheLatest = "given round is greater than the latest round"
)
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 72eb4a7b1..2ea4c4033 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -311,155 +311,162 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVX+uOGM/JVdqyr1TrGcrC6O47KUfXfP9iUYsmcGKxJgAFCaiU//",
- "+xUaAAmS4Az1scpzPf9kawg0Go1Go7vR3fg8SUVRCg5cq8nh50lJJS1Ag8S/aJqKiuuEZeavDFQqWamZ",
- "4JND/40oLRlfTaYTZn4tqV5PphNOC2jamP7TiYTfKyYhmxxqWcF0otI1FNQA1tvStK4hbZKVSByIIwvi",
- "5HhyteMDzTIJSvWx/JnnW8J4mlcZEC0pVzQ1nxS5ZHpN9Jop4joTxongQMSS6HWrMVkyyDM185P8vQK5",
- "DWbpBh+e0lWDYiJFDn08X4liwTh4rKBGql4QogXJYImN1lQTM4LB1TfUgiigMl2TpZB7ULVIhPgCr4rJ",
- "4YeJAp6BxNVKgV3gf5cS4A9INJUr0JNP09jklhpkolkRmdqJo74EVeVaEWyLc1yxC+DE9JqRnyqlyQII",
- "5eT996/Is2fPXpqJFFRryByTDc6qGT2ck+0+OZxkVIP/3Oc1mq+EpDxL6vbvv3+F45+6CY5tRZWC+GY5",
- "Ml/IyfHQBHzHCAsxrmGF69DiftMjsimanxewFBJGroltfKeLEo7/p65KSnW6LgXjOrIuBL8S+zkqw4Lu",
- "u2RYjUCrfWkoJQ3QDwfJy0+fn0yfHFz95cNR8p/uzxfPrkZO/1UNdw8Fog3TSkrg6TZZSaC4W9aU9+nx",
- "3vGDWosqz8iaXuDi0wJFvetLTF8rOi9oXhk+YakUR/lKKEIdG2WwpFWuiR+YVDw3YspAc9xOmCKlFBcs",
- "g2xqpO/lmqVrklJlQWA7csny3PBgpSAb4rX47HZspquQJAavG9EDJ/RflxjNvPZQAjYoDZI0FwoSLfYc",
- "T/7EoTwj4YHSnFXqeocVOVsDwcHNB3vYIu244ek83xKN65oRqggl/miaErYkW1GRS1ycnJ1jfzcbQ7WC",
- "GKLh4rTOUbN5h8jXI0aEeAshcqAcief3XZ9kfMlWlQRFLteg1+7Mk6BKwRUQsfgnpNos+/86/fktEZL8",
- "BErRFbyj6TkBnopseI3doLET/J9KmAUv1Kqk6Xn8uM5ZwSIo/0Q3rKgKwqtiAdKslz8ftCASdCX5EEIW",
- "4h4+K+imP+iZrHiKi9sM21LUDCsxVeZ0OyMnS1LQzbcHU4eOIjTPSQk8Y3xF9IYPKmlm7P3oJVJUPBuh",
- "w2izYMGpqUpI2ZJBRmooOzBxw+zDh/Hr4dNoVgE6HsggOvUoe9DhsInwjNm65gsp6QoClpmRX5zkwq9a",
- "nAOvBRxZbPFTKeGCiUrVnQZwxKF3q9dcaEhKCUsW4bFTRw4jPWwbJ14Lp+CkgmvKOGRG8iLSQoOVRIM4",
- "BQPuNmb6R/SCKvjm+dAB3nwdufpL0V31nSs+arWxUWK3ZORcNF/dho2rTa3+I4y/cGzFVon9ubeQbHVm",
- "jpIly/GY+adZP0+GSqEQaBHCHzyKrTjVlYTDj/yx+Ysk5FRTnlGZmV8K+9NPVa7ZKVuZn3L70xuxYukp",
- "Ww0Qs8Y1ak1ht8L+Y+DFxbHeRI2GN0KcV2U4obRllS625OR4aJEtzOsy5lFtyoZWxdnGWxrX7aE39UIO",
- "IDlIu5KahuewlWCwpekS/9kskZ/oUv5h/inLPEZTw8DuoEWngHMWHJVlzlJqqPfefTZfze4Hax7QpsUc",
- "T9LDzwFupRQlSM0sUFqWSS5SmidKU42Q/k3CcnI4+cu88arMbXc1DwZ/Y3qdYiejiFrlJqFleQ0Y74xC",
- "o3ZICSOZ8RPKByvvUBVi3K6e4SFmZG8OF5TrWWOItARBvXM/uJEaelsdxtK7Y1gNEpzYhgtQVq+1DR8o",
- "EpCeIFkJkhXVzFUuFvUPD4/KsqEgfj8qS0sP1AmBoboFG6a0eoTTp80WCsc5OZ6RH0LYqGALnm/NqWB1",
- "DHMoLN1x5Y6v2mPk5tBAfKAILqeQM7M0ngxGeb8LjkNjYS1yo+7s5RXT+O+ubchm5vdRnb8MFgtpO8xc",
- "aD45ylnLBX8JTJaHHc7pM45z4szIUbfvzdjGQIkzzI14Zed6Wrg76FiT8FLS0iLovthDlHE0vWwji+st",
- "pelIQRfFOdjDAa8hVjfea3v3QxQTZIUODt/lIj2/g/2+MHD62w7BkzXQDCTJqKbBvnL7JX5YY8e/Yz+U",
- "CCAjGv3P+B+aE/PZML6RixassdQZ8q8I/OqZMXCt2mxHMg3Q8BaksDYtMbbotbB81QzekxGWLGNkxGtr",
- "RhPs4Sdhpt44yY4WQt6MXzqMwEnj+iPUQA22y7Szsti0KhNHn4j7wDboAGpuW/paZEihLvgYrVpUONX0",
- "X0AFZaDeBRXagO6aCqIoWQ53sF/XVK37kzD23LOn5PTvRy+ePP316YtvjEFSSrGStCCLrQZFHjo1mii9",
- "zeFRf2aoz1a5jkP/5rl3GLXhxuAoUckUClr2QVlHlD20bDNi2vWp1iYzzrpGcMy2PAMjXizZifWxGtSO",
- "mTJnYrG4k8UYIljWjJIRh0kGe5nputNrhtmGU5RbWd2F8QFSChlxheAW0yIVeXIBUjER8Wq/cy2Ia+EV",
- "krL7u8WWXFJFzNjopat4BnIW4yy94Yga01CofQeqBX224Q1tHEAqJd32yG/nG5mdG3fMurSJ750+ipQg",
- "E73hJINFtWrprkspCkJJhh3x4HgrMjB2R6XuQFo2wBpkzEKEKNCFqDShhIsM0EipVFyODlxxoW8drwR0",
- "KJr12p7TCzAKcUqr1VqTqiTo8O4tbdMxoaldlATPVDXgEaxdubaVHc5en+QSaGYUZeBELJzbzTkEcZIU",
- "vfXaSyInxSOmQwuvUooUlDIGjlVb96Lm29lV1jvohIgjwvUoRAmypPKGyGqhab4HUWwTQ7dWu5yvso/1",
- "uOF3LWB38HAZqTQ2juUCo+OZ3Z2DhiESjqTJBUj02f1L188PctPlq8qBG3WnqZyxAk0lTrlQkAqeqSiw",
- "nCqd7Nu2plFLnTIzCHZKbKci4AFz/Q1V2npuGc9QtbbiBsexdrwZYhjhwRPFQP6HP0z6sFMjJ7mqVH2y",
- "qKoshdSQxebAYbNjrLewqccSywB2fXxpQSoF+yAPUSmA74hlZ2IJRHXt53BXG/3JoTfAnAPbKClbSDSE",
- "2IXIqW8VUDe8VRxAxNhhdU9kHKY6nFNfZU4nSouyNPtPJxWv+w2R6dS2PtK/NG37zEV1I9czAWZ07XFy",
- "mF9aytr75DU1OjBCJgU9N2cTarTWxdzH2WzGRDGeQrKL8822PDWtwi2wZ5MOGBMuYiUYrbM5OvwbZbpB",
- "JtizCkMTHrBs3lGpWcpK1CR+hO2du0W6A0Q9JCQDTZnRtoMPKMBR9tb9ib0z6MK8maI1Sgnto9/TQiPT",
- "yZnCA6ON/Dls0VX6zl5GnwVX2HegKUagmt1NOUFE/RWXOZDDJrChqc635pjTa9iSS5BAVLUomNY2uqCt",
- "SGpRJiGAqIG/Y0TnYrEXuX4Fxvh8ThFUML3+UkwnVm3Zjd9ZR3FpkcMpTKUQ+QhXdI8YUQxGuapJKcyq",
- "MxfM4iMePCe1kHRKDPrXauH5QLXIjDMg/0dUJKUcFbBKQ30iCIliFo9fM4I5wOoxnVO6oRDkUIDVK/HL",
- "48fdiT9+7NacKbKESx8BZhp2yfH4MVpJ74TSrc11Bxav2W4nEdmOng9zUDgdritTZntNewd5zEq+6wCv",
- "3SVmTynlGNdM/9YCoLMzN2PmHvLImqr1/rkj3FFOjQB0bN523aUQyztypMUjANA4cZf6phVZVtwiVSln",
- "juA9l3doiOW0jvKw0d2HBEMA1tR749yfT198M5k2V/f1d3Mm26+fIholyzaxAI0MNrE1cVsMrakHxvTY",
- "KojeiqFgFstIjBbI89zNrCM6SAFmT6s1Kw3IJp5kq6EVi/p/H/774Yej5D9p8sdB8vJ/zD99fn716HHv",
- "x6dX3377/9o/Pbv69tG//1vUrajZIu7+/LtZJbEkTsRv+Am3FxhLIa09tnVqnljeP95aAmRQ6nUs+LOU",
- "oFA02iDOUq+bRQXo+FBKKS6ATwmbwawrYrMVKO9MyoEuMQgRbQox5lK03g6W3zxzBFQPJzJKjsX4B6/4",
- "kDdxMxujI9/egfJiARHZpqc31pX9KpZh5KzbKGqrNBR9f5ft+uuAtv/e68q9TSV4zjgkheCwjSaLMA4/",
- "4cdYb3vcDXRGxWOob9eWaOHfQas9zpjFvC19cbUD+f6uvti+g8Xvwu24OsOYYXTVQF4SStKcoSNHcKVl",
- "leqPnKKpGLBr5DrJG8DDzoNXvkncWxFxJjhQHzlVhoa1ARl1gS8hcmR9D+B9CKparUDpjtK8BPjIXSvG",
- "ScWZxrEKs16JXbASJN7pzGzLgm7Jkubo6/gDpCCLSrfVSDz0lGZ57vyuZhgilh851UYGKU1+Yvxsg+B8",
- "BKHnGQ76UsjzmgrxI2oFHBRTSVzu/2C/ovh301+7owDzTOxnL2/uW+573GOBdw7zk2NnYp0cox7deFx7",
- "uN+bG65gPIkymdGLCsYxfrvDW+ShsQY8Az1qfLdu1T9yveGGkS5ozjKjO92EHboirrcX7e7ocE1rITpe",
- "FT/XT7GwgZVISpqe463xZMX0ulrMUlHMvWk5X4nazJxnFArB8Vs2pyWbqxLS+cWTPXruLeQViYirq+nE",
- "SR11544YBzg2oe6YtT/T/60FefDD6zMydyulHtgoXAs6CJ+MeANchFDrwspM3maR2TDkj/wjP4Yl48x8",
- "P/zIM6rpfEEVS9W8UiC/oznlKcxWghz6oKNjqulH3hPxg4meQbgXKatFzlJyHh7Fzda0yTt9CB8/fjAM",
- "8vHjp97tR//gdENF96gdILlkei0qnbjshETCJZVZBHVVR6cjZJtbtGvUKXGwLUe67AcHPy6qaVmqbrBq",
- "f/plmZvpB2yoXCimWTKitJBeCBrJaLHB9X0rnMkl6aVPbakUKPJbQcsPjOtPJPlYHRw8A9KK3vzNyRrD",
- "k9sSWn6jGwXTdn1GOHGrUMFGS5qUdAUqOn0NtMTVx4O6QA9lnhPs1ooa9TEWCKqZgKfH8AJYPK4dAYeT",
- "O7W9fJppfAr4CZcQ2xjp1Dj+b7peQRzpjZerE4vaW6VKrxOzt6OzUobF/crU2WcrI5P9bYxiK242gUvU",
- "WwBJ15CeQ4Y5Q1CUejttdfcXfu6E86KDKZtbZwPdMAEEXWwLIFWZUacDUL7tRuIr0NqnH7yHc9ieiSZ/",
- "5Dqh9+2AcDW0UZFTg8PIMGu4bR2M7uK7y2MMgi1LH1eNMYSeLQ5rvvB9hjeyPSHvYBPHmKIVsDxECCoj",
- "hLDMP0CCG0zUwLsV68emZ9SbhT35Im4eL/uJa9Jobe4COJwNxmHb7wVgoq64VGRBFWREuBxTG/QcSLFK",
- "0RUM+J5CL+fI0OKWZxSB7Dv3oiedWHYPtN55E0XZNk7MnKOcAuaLYRV0E3au/f1I1pGOM5gRLB3hCLbI",
- "UU2qIw6s0KGy5W22ufBDqMUZGCRvFA6PRpsioWazpsqnv2KWsN/Lo3SAf2EQ/66crZPgxjpIBa4zsrzM",
- "7e7Tnt/WZW75dC2foxU6bUfkW00nLogqthyCowKUQQ4rO3Hb2DNKk1DQLJDB4+flMmccSBK7/KZKiZTZ",
- "/OXmmHFjgNGPHxNifU9kNIQYGwdo4wURAiZvRbg3+eo6SHKXEEE9bLxaCv6GeCSgDW8yKo8ojQhnfCAw",
- "zUsA6iIm6vOrE7eDYAjjU2LE3AXNjZhzTtQGSC+DCNXWTr6Qu6J8NKTO7nD92YPlWnOyR9FNZhPqTB7p",
- "uEK3A+PdqkRsCRTSy5m+Na2GztIxQw8c30O0ehjkHt0IgY4noinP4yy/vRZa+2zun2SNSJ82ybQ+MjPG",
- "+0P8E12lAfr1HcF1ttC77nEdNdLbV5ftRKlAf4qJYrNH+q7RvgNWQQ6oESctDSI5jznMjWIPKG5PfbfA",
- "csd0LMq3j4L7cAkrpjQ0ritzKnlf7H1fd1FM/xZiOTw7Xcqlmd97IWoZbdMM7fVdOM17n8GF0JAsmVQ6",
- "Qb9fdAqm0fcKLcrvTdO4otC+cbeVUFgWlw047Dlsk4zlVZxf3bg/Hpth39ZOGFUtzmGL6iDQdE0WWLkn",
- "GoezY2gbqrVzwm/shN/QO5vvuN1gmpqBpWGX9hhfyL7oSN5d4iDCgDHm6K/aIEl3CEg8+I8h17GMpUBp",
- "sJszMw1nu1yPvc2Uedi7DKUAi+EzykKKziWwlnfOgmH0gTH3mA4K3/TTBgb2AC1Llm06jkALddBcpNey",
- "9n1icYcKuLoO2B4KBE6/WGSqBNXOIW+0W1vCiIdzm42izFk70zsUCOFQTPkCfH1CGdbGKlH7aHUGNP8R",
- "tv8wbXE6k6vp5HZ+wxitHcQ9tH5XL2+UznghZv1IrWuAa5KclqUUFzRPnHd1iDWluHCsic29M/aeRV3c",
- "h3f2+ujNO4f+1XSS5kBlUqsKg7PCduUXMyubrj6wQXyBL2PweJ3dqpLB4tdpxKFH9nINrphSoI32ij80",
- "3vZgKzoP7TJ+L7/X3+ouBuwUd1wQQFnfDzS+K3s90L4SoBeU5d5p5LEduEPHyY2rIBKVCiGAW18tBDdE",
- "yZ2Km97uju+Ohrv2yKRwrB3lngpb0UwRwbshWUaFRF8UsmpBsXSDdQn0hROvisRsv0TlLI07GPlCGebg",
- "9uLINCbYeEAZNRArNnAPySsWwDLN1AhDt4NkMEaUmL4MyBDtFsKVoq04+70CwjLg2nySuCs7GxVrZThX",
- "c/84NbpDfywH2LqnG/C30THCsiXdEw+R2K1ghNdUPXSPa5PZT7R2x5gfAn/8NW67wxF7R+KOm2rHH46b",
- "bcjQun3dFFaO7cs/wxi2ytj+srXeeHX1UwbGiJahZSpZSvEHxO08NI8jYeu+UAvDqMk/gM8i2T9dEVN7",
- "d5pqus3og8s9pN2EXqj2Df0A1+PKB3dSWBTDu2cpt0ttq0K24kLiDBPGcs0t/IZhHM69+LecXi5orGKI",
- "UTIMTkfN7WfLkawF8Z097Z3Pm7naOTMSXKTWbZlN6CpBNhkl/eThGyoMdtjRqkKjGSDXhjrB1F5+5UpE",
- "wFT8knJbXNT0s1vJ9VZgnV+m16WQmI6p4j7vDFJW0DyuOWRI/Xb6asZWzJbWrBQEtRsdIFuT2HKRq39p",
- "75cb0pwsycE0qA7rViNjF0yxRQ7Y4oltsaAKJXntiKq7mOkB12uFzZ+OaL6ueCYh02tlCasEqZU6NG/q",
- "m5sF6EsATg6w3ZOX5CHeWSl2AY8MFd35PDl88hKdrvaPg9gB4Gro7pImGYqT/3DiJM7HeGlnYRjB7aDO",
- "osmFtvD5sODasZts1zF7CVs6Wbd/LxWU0xXEwySKPTjZvria6Ejr0IVntmqv0lJsCdPx8UFTI58GYj6N",
- "+LNokFQUBdOFu9lQojD81BRmtIN6cLYEsKse5PHyH/GCsPT3Ix0j8n6dpvZ8i80ar3Hf0gLaZJ0SanNw",
- "c9Zc3fuCX+TEZ/JjOaW6ipKljRnLTB3VHLzJX5JSMq7RsKj0MvkbSddU0tSIv9kQusnim+eRElLtqjH8",
- "eojfO90lKJAXcdLLAbb3OoTrSx5ywZPCSJTsURNjHezKwZvMeLSYl+jdYMHdoMcqZQZKMshuVYvdaCCp",
- "b8V4fAfAW7JiPZ9r8eO1Z3bvnFnJOHvQyqzQL+/fOC2jEDJW16XZ7k7jkKAlgwsMXIsvkoF5y7WQ+ahV",
- "uA32f+7Ng1c5A7XM7+WYIfBdxfLsH03OSKcKn6Q8XUf9/gvT8demSnI9ZbuPo2VE1pRzyKPg7Jn5qz9b",
- "I6f/P8XYcQrGR7btVtez0+1MrkG8jaZHyg9oyMt0bgYIqdoOoq+jLvOVyAiO09SsaLisXzAwqKD1ewVK",
- "x5L28ION/ED/jrELbAEnAjxDrXpGfrCvnKyBtFLqUZtlRZXb9GzIViCd47Eqc0GzKTFwzl4fvSF2VNvH",
- "lvy0BaRWqMy1Z9Gx64MCN+NiCH31znh883g4uwMuzayVxgoXStOijKWumBZnvgHmx4S+TlTzQurMyLHV",
- "sJXX3+wghh+WTBZGM62hWRmPPGH+ozVN16i6tqTJMMuPr3zmuVIFheHrOq91jRrcdwZvV/zM1j6bEmHs",
- "i0um7OMWcAHtbJk6dcyZTj57pj09WXFuOSUqo3elNt6E7B45e6Ht3aFRzDqEv6biYgsHXrcQ3Cn2ihZ9",
- "6FaV61WEt1nFdYlS/2hRSrngLMWSC8FzGjXK7qGMMXcFI6pTdJ1Rfou7HRrZXNFadnU4kaPiYHU7Lwgd",
- "4frOyuCrWVTLHfZPjS8yrKkmK9DKSTbIpr4ko/OXMK7A1RzCN1MCOSlk6/4FJWT0Si+pXb/XZCOMnR9Q",
- "gL8339468wiDSs8ZR0XIkc3Fr1qPBtbx10Z7YpqsBCg3n3Zqvvpg+swwPT2DzaeZr/uPMOz1hZm2vavr",
- "gzryN3fupsy0fWXaEht1WP/cClO0gx6VpRt0uGBnVB/QGz5I4MgNTOJd4AFxa/ghtB3stvPKHc9Tw2hw",
- "gRd2UOI53GOMunhlp1rvBc0ry1HYgthQl2h+JeMRNN4wDs2rFJEDIo0eCbgwuF8H+qlUUm1VwFEy7Qxo",
- "jrd0MYGmtHPR3hZUZ4GRJDhHP8bwMjZ1NwcER92gUdwo39aPYRjuDpSJV/gKjyNkv4omalVOicow7LhT",
- "VzMmOIzg9pV72wdAfxv0dSLbXUtqd851TqKhTLJFla1AJzTLYsXavsOvBL+SrELNATaQVnWxq7IkKWZs",
- "t1PY+9zmBkoFV1WxYyzf4JbDpSKmR7/FAZSPq26AzwiKXyN6j1+/e//61dHZ62N7Xhiz3KaSGZ1bQmEE",
- "orFjlQajOlcKyG8hGX/Dfr91JhxHM6inG2HasKavZ0QMqF9s8d9YQaphBnJ36teO6vIX6Njx2up9G1JP",
- "OTdbL1FslYynBB59tydHM/TN9mPT/043ZC5WbUTuuXLMLmEcrlFMDL8251uYBd6rsmZPwDpJG2OohC/N",
- "j9ZtnV7YFp544vbKrqHvvq6yvtt7MlwvfYpn9EAkZVAvh1o1wF4GDcVTpoPhv1S7LBxNyU5JiUXOYxBs",
- "MIYtrm7fZYw6woYCMGz8hfnc6z1Oge2ZAwh7J0F9ZE8foR992CApKXM3nY2w6FPWBRj3Q77HhB42C9yd",
- "hAvbRSCxmfSqKe7mkF7YdpB6YIvezcan/x/V18h4uYUly1fAXc3ydkDm6LCw5RJSzS72hMn/hzEtmhDs",
- "qTc+7IMYQdQ8q8OM/POd17SJGoR2RbHvxCeoMXJrdIaCZM9h+0CRFjdEq/BNPaPeJLsUKYD1VxLDIkLF",
- "rmmst8R5zpmqOQOp4K9FbXdoSl8Nlj8Okj5uOJZnSULDRJAdQ16ImLk1aizT9VrpURgxMxRJ3y9AOnx6",
- "HWO9V1WXrq/f5wxUUWNVd6vjXbrsVkxqqB2EPs8VlP/NZzDZUey7r02BZnTHXlKZ+RZR+8KbLslAbFo3",
- "2tsG1bM40st6ZNYEsfQDniNVITBUKc2FYnyVDMV7teNGwqej8HYMPTlY2RXxWoJ0hdm1f1Y30cIHvezC",
- "Yxcp3DNHNyGCGqxxaJEbzI9+3ySAYyksah9Vdjd/4QSNsUENdjJI0x4ecxexX9nvPsLXl0IaYUY5fk32",
- "5ln78CWmekQMuX5J3Gm5P3L4JqYK49y+e6FiOdvckDJ0+ZVSZFVqD+hwYzSG4diKCDtESVTLT/uz7Cls",
- "OdYHeRPkYZzDdm6VpnRNeVOopb2tbelGO4cg77Gz2ndqxcUV1nxlJ7C6Ezz/TEtoOimFyJMBH99JP/W8",
- "uwfOWXoOGTFnh7/4HyiBTB6ia6m+xLlcb32qdVkCh+zRjBBjSxWl3vr7nHbRtc7g/IHeNf4GR80qWw3C",
- "GWmzjzwes2KfKb+lfPNgdks1BUb43XIoC2RPbvdmIO1d0stIQfCxb75Fbli6RZobprJYxLSUGyb6jdrf",
- "fUMtwvphisYe++e8ZdXZskKdWxUh4Y6tu8CdfE3rrp98MnZ6OA+UapWC/jxHL0CLtgO0H0P4xjXRJ+6w",
- "R0EvxngU4iVQTHd0aViCYP0ggqiS3578RiQssZ6gII8f4wCPH09d09+etj8b6+vx4+jOvDdnRutpOTdu",
- "jGP+MXQLb2+aBwI+OutRsTzbxxit8J2mticGqPzqAp3+lOqiv1oTub9VXaHF67hRu4uAhInMtTV4MFQQ",
- "mDMiJsd1m0Uf/1OQVpLpLeZfeYuK/RrNa/+hdsK490rriH0XMK7FOdQZfI3LpnnM/QdhHwsszFmPTmyN",
- "rx+83tCizMFtlG8fLP4Kz/72PDt49uSvi78dvDhI4fmLlwcH9OVz+uTlsyfw9G8vnh/Ak+U3LxdPs6fP",
- "ny6eP33+zYuX6bPnTxbPv3n51wf+JXWLaPNK+f/GErzJ0buT5Mwg29CElqx+9MSwsS/nSVPcicYmySeH",
- "/qf/6XfYLBVFA97/OnHBhJO11qU6nM8vLy9nYZf5Cm20RIsqXc/9OP3HJt6d1IFONkEFV9TGsBhWwEV1",
- "rHCE396/Pj0jR+9OZg3DTA4nB7OD2ROsml0CpyWbHE6e4U+4e9a47nPHbJPDz1fTyXwNNMdS6uaPArRk",
- "qf+kLulqBXLm6pqany6ezn2cxPyzs0+vdn2bhyWC5p9bZny2pydWUZl/9slBu1u3sm+c+yLoMBKL4SHt",
- "c2rzz2gPDv7eRuOz3rDsau7dT66He5Zo/rl5J+zK7sIcYq4jG/hGg2fFpsZex2dmlf3VbDwfb89U+1m5",
- "motOMsM9pter+s20oNTA4Yee+mUBEQ8Jt5rho2YntEZqhJ2WFYTZ77Uob7VvBPqHg+Tlp89Ppk8Orv5i",
- "BLb788Wzq5E+4OZZXHJaS+ORDT9hsDpas7hBnh4c/Dd7VPj5NWe8U+duXZNFiht/RzPiY0Fx7Cf3N/YJ",
- "Rw+8EZzEHgxX08mL+5z9CTcsT3OCLYMsqf7S/8LPubjkvqU5xauioHLrt7FqCQX/EiKeFXSl0AKT7IJq",
- "mHxCEz8WNDAgXPD15msLF3yS+qtwuS/h8mW81f30mhv8y5/xV3H6pYnTUyvuxotTp8rZdIO5fa+l0fB6",
- "xXhXEM17wAwEuut1wq6E/QF077HFyS1FzJ/27uJ/733y/OD5/WHQriT5I2zJW6HJ93jt9YXu2XHbZ5cm",
- "1LGMsqzH5Fb8g9LfiWy7g0KFWpUuRDiilywYNyj3T5f+Sya9xxDPYUvsVbB3+bvHgNv60NUtZcAX+27j",
- "VxnyVYZIO/yz+xv+FOQFS4GcQVEKSSXLt+QXXid43dysy7JomF176/dkmrFGUpHBCnjiBFayENnWF/dp",
- "ATwH65ruKSrzz+0Kndb9NeiWOsbf64eD+kgvtuTkuKfB2G5dSfvdFpt2LMaITdhFcadl2JVFA8bYLjY3",
- "E1kJTSwVMjepr4Lnq+C5lfIyevPE9JeoNeEdOd0zeeoznWO1AKjuDz3G5vhTt+t/2Wfwv4qEryLh5iLh",
- "B4hsRty1TkhEmO4mnt6+gMDIq6xb5x7DF3zzKqeSKBjrpjhCiM45cR9S4r6NtCitrI1GOYENU/huS2TB",
- "7tZu+yrivoq4L+jWar+gaSsi17Z0zmFb0LK2b9S60pm4tBWColIRi+fS3FXaw9p3dSSGFsQDaBKcyM8u",
- "oy/f4vvxLDNqnGYFGJWqlnWmsw9bbeJmDYTmwcMV4zgAigocxZaUpEHqgIJUcPs8WOeuzWH21tqEMSH7",
- "ewUo0RxtHI6TaeuyxS1jpIDjrfWv/t3I1Q5fev3GV+vv+SVlOlkK6TKHkEL9KAwNNJ+7WhidX5u8zt4X",
- "TFYNfgxiN+K/zuuaxtGP3aiT2FcXFOIbNWFlYZgWrmEdoPXhk1kKLInnlreJOjqczzHcfi2Unk+upp87",
- "EUnhx0819T/XJ69bhatPV/8/AAD//75BkK9YsgAA",
+ "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/aoc+4Yzkl+7VlXqO8VyEl0cR2Up2bvP9iUYsmcGKxJgAFDSxKf/",
+ "/QoNgARJcIZ6rHKp80+2hng0Go1Gv/F5koqiFBy4VpODz5OSSlqABol/0TQVFdcJy8xfGahUslIzwScH",
+ "/htRWjK+mkwnzPxaUr2eTCecFtC0Mf2nEwm/V0xCNjnQsoLpRKVrKKgZWG9K07oe6SpZicQNcWiHOD6a",
+ "XG/5QLNMglJ9KH/i+YYwnuZVBkRLyhVNzSdFLpleE71mirjOhHEiOBCxJHrdakyWDPJMzfwif69AboJV",
+ "usmHl3TdgJhIkUMfzteiWDAOHiqogao3hGhBMlhiozXVxMxgYPUNtSAKqEzXZCnkDlAtECG8wKticvBh",
+ "ooBnIHG3UmAX+N+lBPgDEk3lCvTk0zS2uKUGmWhWRJZ27LAvQVW5VgTb4hpX7AI4Mb1m5MdKabIAQjl5",
+ "/+1r8uzZs1dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeYrISnPkrr9+29f4/ynboFjW1GlIH5YDs0X",
+ "cnw0tADfMUJCjGtY4T60qN/0iByK5ucFLIWEkXtiG9/rpoTz/6m7klKdrkvBuI7sC8GvxH6O8rCg+zYe",
+ "VgPQal8aTEkz6Ie95NWnz/vT/b3rv304TP7L/fni2fXI5b+ux92BgWjDtJISeLpJVhIonpY15X18vHf0",
+ "oNaiyjOyphe4+bRAVu/6EtPXss4LmleGTlgqxWG+EopQR0YZLGmVa+InJhXPDZsyozlqJ0yRUooLlkE2",
+ "Ndz3cs3SNUmpskNgO3LJ8tzQYKUgG6K1+Oq2HKbrECUGrlvhAxf0/y4ymnXtwARcITdI0lwoSLTYcT35",
+ "G4fyjIQXSnNXqZtdVuRsDQQnNx/sZYu444am83xDNO5rRqgilPiraUrYkmxERS5xc3J2jv3dagzWCmKQ",
+ "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFLlcg167O0+CKgVXQMTiX5Bqs+3/4/Snd0RI8iMo",
+ "RVdwQtNzAjwV2fAeu0ljN/i/lDAbXqhVSdPz+HWds4JFQP6RXrGiKgivigVIs1/+ftCCSNCV5EMA2RF3",
+ "0FlBr/qTnsmKp7i5zbQtQc2QElNlTjczcrwkBb36em/qwFGE5jkpgWeMr4i+4oNCmpl7N3iJFBXPRsgw",
+ "2mxYcGuqElK2ZJCRepQtkLhpdsHD+M3gaSSrABw/yCA49Sw7wOFwFaEZc3TNF1LSFQQkMyM/O86FX7U4",
+ "B14zOLLY4KdSwgUTlao7DcCIU28Xr7nQkJQSlixCY6cOHYZ72DaOvRZOwEkF15RxyAznRaCFBsuJBmEK",
+ "JtyuzPSv6AVV8PL50AXefB25+0vR3fWtOz5qt7FRYo9k5F40X92BjYtNrf4jlL9wbsVWif25t5FsdWau",
+ "kiXL8Zr5l9k/j4ZKIRNoIcJfPIqtONWVhIOP/In5iyTkVFOeUZmZXwr7049VrtkpW5mfcvvTW7Fi6Slb",
+ "DSCzhjWqTWG3wv5jxouzY30VVRreCnFeleGC0pZWutiQ46OhTbZj3pQwD2tVNtQqzq68pnHTHvqq3sgB",
+ "IAdxV1LT8Bw2Egy0NF3iP1dLpCe6lH+Yf8oyN711uYyh1tCxu2/RNuBsBodlmbOUGiS+d5/NV8MEwGoJ",
+ "tGkxxwv14HMAYilFCVIzOygtyyQXKc0TpanGkf5DwnJyMPnbvDGuzG13NQ8mf2t6nWInI49aGSehZXmD",
+ "MU6MXKO2MAvDoPETsgnL9lAiYtxuoiElZlhwDheU61mjj7T4QX2AP7iZGnxbUcbiu6NfDSKc2IYLUFa8",
+ "tQ0fKRKgniBaCaIVpc1VLhb1D18dlmWDQfx+WJYWHygaAkOpC66Y0uoxLp82Jymc5/hoRr4Lx0Y5W/B8",
+ "Yy4HK2qYu2Hpbi13i9WGI7eGZsRHiuB2CjkzW+PRYGT4+6A41BnWIjdSz05aMY2/d21DMjO/j+r81yCx",
+ "ELfDxIValMOcVWDwl0Bz+apDOX3CcbacGTns9r0d2ZhR4gRzK1rZup923C14rFF4KWlpAXRf7F3KOGpg",
+ "tpGF9Y7cdCSji8IcnOGA1hCqW5+1nechCgmSQgeGb3KRnt/DeV+YcfrHDocna6AZSJJRTYNz5c5L/M7G",
+ "jt9jP+QIICOC/U/4H5oT89kQvuGLdlijsDOkXxGY1zOj51rp2c5kGqD+LUhhVVtiVNIbQfm6mbzHIyxa",
+ "xvCIN1abJtjDL8IsvbGVHS6EvB29dAiBk8YCSKgZNTgu087OYtOqTBx+IlYE26AzUON06QuTIYa6w8dw",
+ "1cLCqab/BiwoM+p9YKE90H1jQRQly+EezuuaqnV/EUate/aUnH5/+GL/6a9PX7w0ekkpxUrSgiw2GhT5",
+ "yknTROlNDo/7K0N5tsp1fPSXz73dqD1ubBwlKplCQcv+UNYeZS8t24yYdn2stdGMq64BHHMsz8CwF4t2",
+ "Yk2tBrQjpsydWCzuZTOGEJY1s2TEQZLBTmK66fKaaTbhEuVGVvehfICUQkYsInjEtEhFnlyAVExEjNsn",
+ "rgVxLbxAUnZ/t9CSS6qImRuNdRXPQM5ilKWvOILGNBRq14Vqhz674g1u3IBUSrrpod+uN7I6N++YfWkj",
+ "39t+FClBJvqKkwwW1aoluy6lKAglGXbEi+MtW611cI+eSCGW9y5uRGeJLQk/oIGd5KaPu+msbIAAvxMZ",
+ "GEWpUvfA3pvBGuwZyglxRhei0oQSLjJArapSccY/4JpDnwC6MnR4l+i1FSwWYCT4lFZmtVVJ0FDfo8Wm",
+ "Y0JTS0UJokYNWDJrE7RtZaezbp9cAs2MZA+ciIUzFzpDJi6SopdBe9bprp2IrtOCq5QiBaWMRmbl7J2g",
+ "+XaWLPUWPCHgCHA9C1GCLKm8JbBaaJrvABTbxMCt5URnY+1DPW76bRvYnTzcRiqNUmapwAil5sDloGEI",
+ "hSNxcgESbY3/1v3zk9x2+6pyIBLAiVZnrEDdjlMuFKSCZyo6WE6VTnYdW9OoJf+ZFQQnJXZSceAB+8Jb",
+ "qrS1ODOeoS5g2Q3OYw0PZophgAevQDPyL/7264+dGj7JVaXqq1BVZSmkhiy2Bg5XW+Z6B1f1XGIZjF3f",
+ "t1qQSsGukYewFIzvkGVXYhFEdW2YcS6Z/uLQfGHugU0UlS0gGkRsA+TUtwqwG3pDBwAximPdEwmHqQ7l",
+ "1C7Y6URpUZbm/Omk4nW/ITSd2taH+uembZ+4qG74eibAzK49TA7yS4tZ6wdfUyO048ikoOfmbkIR3JrG",
+ "+zCbw5goxlNItlG+OZanplV4BHYc0gHtx0XaBLN1DkeHfqNEN0gEO3ZhaMEDqtgJlZqlrERJ4gfY3Ltg",
+ "1Z0gatIhGWjKjHoQfLBCVhn2J9bX0R3zdoLWKKm5D35PbI4sJ2cKL4w28OewQdvuiXWinwWu93uQFCOj",
+ "mtNNOUFAvWvOXMhhE7iiqc435prTa9iQS5BAVLUomNY2KqItSGpRJuEAUYvElhmdTcg6oP0OjDFSneJQ",
+ "wfL6WzGdWLFlO3xnHcGlhQ4nMJVC5CNs5z1kRCEYZVsnpTC7zlwQjo/U8JTUAtIJMWgQrJnnI9VCM66A",
+ "/C9RkZRyFMAqDfWNICSyWbx+zQzmAqvndFb0BkOQQwFWrsQvT550F/7kidtzpsgSLn3kmmnYRceTJ6gl",
+ "nQilW4frHlR0c9yOI7wdTTXmonAyXJenzHbaItzIY3bypDN4bd8xZ0opR7hm+XdmAJ2TeTVm7SGNrKla",
+ "7147jjvKChMMHVs37ju6EP89OnwzdAy6/sSB46X5OOR7MfJVvrkHPm0HIhJKCQpPVaiXKPtVLMPgRnfs",
+ "1EZpKPqqve3664Bg896LBT0pU/CccUgKwWETjednHH7Ej7He9mQPdEYeO9S3Kza14O+A1Z5nDBXeFb+4",
+ "2wEpn9ROx3vY/O64HatOGNaJWinkJaEkzRnqrIIrLatUf+QUpeLgLEdM/V7WH9aTXvsmccUsoje5oT5y",
+ "qgwOa1k5ap5cQkQL/hbAq0uqWq1A6Y58sAT4yF0rxknFmca5CrNfid2wEiTa22e2ZUE3ZElzVOv+ACnI",
+ "otLtGxOjz5Q2Wpc1MZlpiFh+5FSTHIwG+iPjZ1c4nA/y8jTDQV8KeV5jYRY9DyvgoJhK4i6J7+zX76la",
+ "++Wbhp5Jus7WiGLGb0LUNhpa4e3/+6v/PPhwmPwXTf7YS179t/mnz8+vHz/p/fj0+uuv/0/7p2fXXz/+",
+ "z/+I7ZSHPRYb5SA/PnLS5PERigyNcakH+4NZHArGkyiRna2BFIxjiG2HtshXRvDxBPS4MVO5Xf/I9RU3",
+ "hHRBc5ZRfTty6LK43lm0p6NDNa2N6CiQfq2fYi7dlUhKmp6jR2+yYnpdLWapKOZeip6vRC1RzzMKheD4",
+ "LZvTks1VCen8Yn/HlX4HfkUi7KrDZG8tEPT9gfF4RjRZuhBFPHnLiluiqJQzUmK4jvfLiOW0jlm1uWoH",
+ "BAMa19Q7Fd2fT1+8nEybQMT6u9HU7ddPkTPBsqtYuGkGVzFJzR01PGKPFCnpRoGO8yGEPeqCsn6LcNgC",
+ "jIiv1qx8eJ6jNFvEeeX3jjE6je+KH3MbgGFOIppnN87qI5YPD7eWABmUeh3LYWnJHNiq2U2AjkullOIC",
+ "+JSwGcy6Gle2AuWdYTnQJeZSoIlRjAnqqs+BJTRPFQHWw4WMUmti9INisuP719OJEyPUvUv2buAYXN05",
+ "a1us/1sL8ui7N2dk7livemQjn+3QQaxqxJLhwrFazjbDzWzmng39/sg/8iNYMs7M94OPPKOazhdUsVTN",
+ "KwXyG5pTnsJsJciBj/A6opp+5D2ZbTC5NoitI2W1yFlKzkPZuiFPmzDVH+Hjxw+G43/8+KnnuelLwm6q",
+ "KH+xEySXTK9FpROXEZJIuKQyi4Cu6owAHNnmc22bdUrc2JYVu4wTN36c59GyVN3I4P7yyzI3yw/IULm4",
+ "V7NlRGkhvVRjRB0LDe7vO+EuBkkvfTpRpUCR3wpafmBcfyLJx2pv7xmQVqjsb054MDS5KaFl87pV5HLX",
+ "3oULtxoSXGlJk5KuQEWXr4GWuPsoeRdoXc1zgt1aIbo+oAWHahbg8TG8ARaOG4cb4uJObS+f2htfAn7C",
+ "LcQ2RtxonBa33a8gaPfW29UJ/O3tUqXXiTnb0VUpQ+J+Z+qMv5URsrwnSbEVN4fAJUcugKRrSM8hwzwt",
+ "KEq9mba6e2elE1k962DK5jPaqEJMukHz4AJIVWbUCfWUb7rZDwq09ikf7+EcNmeiydm5SbpDO/peDR1U",
+ "pNRAujTEGh5bN0Z3853jGyOOy9IHsWPApieLg5oufJ/hg2xF3ns4xDGiaEWHDyGCyggiLPEPoOAWCzXj",
+ "3Yn0Y8sz+srC3nyR9EfP+4lr0qhhznkdrgaD3u33AjA5WlwqsqBGbhcur9dGmAdcrFJ0BQMScmihHRnH",
+ "3bLq4iC77r3oTSeW3Qutd99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6AXAFM4LlOhzCFjmKSXW0hGU6VLYs",
+ "5bb+wBBocQIGyRuBw4PRxkgo2ayp8inHmJntz/IoGeDfmDGxLU/uOPC2B+nXdRac57ndc9rTLl22nE+R",
+ "83lxoWo5IsfNSPgYABbbDsFRAMogh5VduG3sCaXJ3mg2yMDx03KZMw4kiTnuqVIiZTZnvLlm3Bxg5OMn",
+ "hFhjMhk9QoyMA7DRuYUDk3ciPJt8dRMgucs+oX5sdIsFf0M87NKGZhmRR5SGhTM+EFTnOQB10R71/dWJ",
+ "OcJhCONTYtjcBc0Nm3MaXzNIL10LxdZOcpZzrz4eEme32PLtxXKjNdmr6DarCWUmD3RcoNsC8XZRIrYF",
+ "CvHlbFk1robu0jFTD1zfQ7j6Kkj0uhUAHU2/KYnkNL+dGlr7bu7fZA1LnzYJzD6qNEb7Q/QT3aUB/PVN",
+ "EHVq1kn3uo4q6W23azsrLZCfYqzYnJG+r6PvUVGQA0rESUuCSM5jHjAj2AOy21PfLdDcMfeN8s3jwJcv",
+ "YcWUhsYWbW4l71x5aNscxZR7IZbDq9OlXJr1vRei5tE2pxM7tpb54Cu4EBqSJZNKJ2jIjy7BNPpWoUb5",
+ "rWkaFxTa0QK2+gzL4rwBpz2HTZKxvIrTq5v3hyMz7bvaCKOqxTlsUBwEmq7JAqslRWOItkxtw8y2Lvit",
+ "XfBbem/rHXcaTFMzsTTk0p7jL3IuOpx3GzuIEGCMOPq7NojSLQwSL/4jyHUsPSwQGuzhzEzD2TbTY+8w",
+ "ZX7sndEXForhO8qOFF1LoC1vXQVDH4lR95gOig31Ux4GzgAtS5ZddQyBdtRBdZHeSNv3WdwdLODuusF2",
+ "YCAw+sWiaiWodsJ+I93aslE8XNtsFGbO2mn1IUMIp2LKFz3sI8qQNlbm2oWrM6D5D7D5xbTF5Uyup5O7",
+ "2Q1juHYj7sD1Sb29UTyjh9vakVpugBuinJalFBc0T5x1dYg0pbhwpInNvTH2gVld3IZ39ubw7YkD/3o6",
+ "SXOgMqlFhcFVYbvyL7MqWxtg4ID4ompG4fEyuxUlg82vc7ZDi+zlGlwBq0Aa7VXaaKztwVF0FtplPNBm",
+ "p73VOQbsErc4CKCs/QON7cq6B9ouAXpBWe6NRh7agaAYXNy4ci1RrhAOcGfXQuAhSu6V3fROd/x0NNS1",
+ "gyeFc20psVXYKnKKCN71HxsREm1RSKoFxToZ1iTQZ068KhJz/BKVszRuYOQLZYiDW8eRaUyw8YAwakas",
+ "2IAfklcsGMs0UyMU3Q6QwRxRZPqaK0O4WwhX/rfi7PcKCMuAa/NJ4qnsHFQsTOJMzf3r1MgO/bncwNY8",
+ "3Qx/FxkjrBHTvfEQiO0CRuim6oF7VKvMfqG1Ocb8ENjjb+DtDmfsXYlbPNWOPhw12xjAddvdFFbr7fM/",
+ "Qxi2stvuUsFeeXXFagbmiJb+ZSpZSvEHxPU8VI8jIfe+Kg7DEI8/gM8imUtdFlNbd5oKxs3sg9s9JN2E",
+ "Vqi2h36A6nHnA58UViDx5lnK7VbbSpytQK84wYTBmXM7fkMwDuZeQGtOLxc0Vp7FCBkGpsPG+9kyJGtB",
+ "fGePe2fzZq5Q0YwEjtS6LbPJaCXIJhumn/h8S4HBTjtaVGgkA6TaUCaYWudXrkRkmIpfUm4Lupp+9ii5",
+ "3gqs8cv0uhQSU0lV3OadQcoKmsclhwyx3069zdiK2XKmlYKgXqYbyNaBtlTkao5a/3KDmuMl2ZsGFXnd",
+ "bmTsgim2yAFb7NsWC6qQk9eGqLqLWR5wvVbY/OmI5uuKZxIyvVYWsUqQWqhD9ab23CxAXwJwsoft9l+R",
+ "r9BnpdgFPDZYdPfz5GD/FRpd7R97sQvA1S3exk0yZCf/dOwkTsfotLNjGMbtRp1FEyNtsflhxrXlNNmu",
+ "Y84StnS8bvdZKiinK4iHSRQ7YLJ9cTfRkNbBC89spWSlpdgQpuPzg6aGPw0EcRv2Z8EgqSgKpgvn2VCi",
+ "MPTUFMO0k/rhbNllV6rJw+U/ooOw9P6RjhL5sEZTe7/FVo1u3He0gDZap4Ta/OGcNa57X12NHPsqBFi7",
+ "qi5ZZXFj5jJLRzEHPflLUkrGNSoWlV4m/yDpmkqaGvY3GwI3Wbx8HqnX1S7Rw28G+IPjXYICeRFHvRwg",
+ "ey9DuL7kKy54UhiOkj1ukiaCUznoyYxHi3mO3g0W3D70WKHMjJIMklvVIjcacOo7ER7fMuAdSbFez43o",
+ "8cYre3DKrGScPGhldujn92+dlFEIGatJ0xx3J3FI0JLBBQauxTfJjHnHvZD5qF24C/R/rufBi5yBWObP",
+ "ckwR+KZiefZLkwTWKXkoKU/XUbv/wnT8talMXS/ZnuNoCZQ15Rzy6HD2zvzV362R2/9fYuw8BeMj23ZL",
+ "GdrldhbXAN4G0wPlJzToZTo3E4RYbWfF1FGX+UpkBOdp6m00VNavzhiUK/u9AqVjGQb4wUZ+oH3H6AW2",
+ "WhYBnqFUPSPf2Zdl1kBa5QBQmmVFldvUcshWIJ3hsSpzQbMpMeOcvTl8S+ysto+tr2qrda1QmGuvoqPX",
+ "B8V5xsUQ+lKp8fjm8eNsD7g0q1Yaq3MoTYsylotmWpz5BpjwFto6UcwLsTMjR1bCVl5+s5MYelgyWRjJ",
+ "tB7N8nikCfMfrWm6RtG1xU2GSX58mTlPlSooxl8X1a3r6+C5M3C7SnO20NyUCKNfXDJlHxSBC2inv9W5",
+ "oE518ulw7eXJinNLKVEevS1X+TZo98BZh7Y3h0Yh6yD+hoKLrdJ406p7p9grWrCiW8KvV4XfpkDV9WD9",
+ "Q1Ep5YKzFMtFBE+Y1CC7x0nG+ApGVNboGqP8EXcnNHK4ooUD63Aih8XBUoKeETrE9Y2VwVezqZY67J8a",
+ "X8FYU01WoJXjbJBNff1LZy9hXIGrl4Tv1AR8UsiW/wU5ZNSll9Sm3xuSEcbODwjA35pv75x6hEGl54yj",
+ "IOTQ5uJXrUUD307QRnpimqwEKLeedgKh+mD6zDCXLoOrTzP/1gKOYd0XZtnWV9cf6tB77pynzLR9bdra",
+ "ygnNz60wRTvpYVm6SYero0blAX3FBxEc8cAk3gQeILcePxxtC7ltdbnjfWoIDS7QYQcl3sM9wqgrhXZK",
+ "I1/QvLIUhS2IDXWJJkwzHgHjLePQvAQSuSDS6JWAG4PndaCfSiXVVgQcxdPOgObopYsxNKWdifauQ3U2",
+ "GFGCa/RzDG9jU+R0gHHUDRrBjfJN/QCJoe5AmHiNLx85RPZLlqJU5YSoDMOOO0VMY4zDMG5fJrl9AfSP",
+ "QV8mst21pPbk3OQmGsokW1TZCnRCsyxWaO4b/ErwK8kqlBzgCtKqLtRVliTFEgztmhR9anMTpYKrqtgy",
+ "l29wx+lSEZOj3+EEysdVN4PPCLJfw3qP3py8f/P68OzNkb0vjFpuU8mMzC2hMAzR6LFKgxGdKwXktxCN",
+ "v2G/3zoLjoMZFC+OEG1YQNkTIgbULzb4b6yY1jABOZ/6jaO6vAMdO95YvG+P1BPOzdFLFFsl4zGBV9/d",
+ "0dFMfbvz2PS/1wOZi1UbkAdOc9/GjMM9irHhN+Z+C7PAexXi7A1YJ2ljDJXw7yCgdlunF7aZJ964vZJx",
+ "aLuvS9pvt54MF6ef4h09EEkZJPdTKwZYZ9BQPGU6GP5LtcvC0ZRs5ZRYUT42gg3GsJXs7VuYUUPYUACG",
+ "jb8wn3u9xwmwPXUAx96KUB/Z0wfoBx82SErKnKezYRZ9zLoA437I95jQw2aDu4twYbs4SGwl8Qrhw3U2",
+ "mtoaeA2UQrGmqmWsdPjIsJIzrP4d1Anpj+V9uheQaiPUB74qCXCTqiFmsuChgy/1NgbUjzr6xpXZ2FZb",
+ "o1+/dAez6WUABFkstvbjbHwlicM6IgH9pPjUwAq4e2ugHds7OsJwuYRUs4sdGRf/NFpqE80/9Xqsfcgm",
+ "SMBgdcSaf333hup1A9C2hIit8AT1p+4MzlC89TlsHinSooZoMcqp53m3SVRGDCB3SAyJCBXz+FnDm3PC",
+ "MFVTBmLBe9htd2hKvgxWAQ/yh245lydJQsOcoi1TXoiY5j5qLtP1Rpl2GHw1lJTRr8M7LAgdYdljVb/g",
+ "UD+vG2g15LhfDurSJUpjfkxta/Yp06D8bz4Zzs5in21u6pSjZf+Sysy3iKqqXgtOttxHvUwKX0O2C/Sy",
+ "npk18VD92PlIgRGMektzoRhfJUOhg+0QpPDJN3S04nWABY4RriVI9z6B9q9iJ1r4+KltcGxDhXue7DZI",
+ "UINFvSxwg6n275taAlgmkdo30Z0TOVyg0VupgU4GGf/Dc25D9mv73QeL+zJ5IzRyR6/JzpR9HwnHVA+J",
+ "IdUvibstdweh30brZZzb92pULP2fG1SG1uNSiqxK7QUdHozGxjC2uMYWVhJVGNP+Knuyf46lZt4GKT3n",
+ "sJlb+TtdU97U/GkfaytC2TUEKbSd3b5Xg0Bc98lXdgGre4Hzz1Sqp5NSiDwZMBcf96sYdM/AOUvPISPm",
+ "7vAxJAOVwMlXaKWs/YGX643P2i9L4JA9nhFi1PKi1BvvGmwX5OxMzh/pbfNf4axZZQuLOH1/9pHHw5+w",
+ "5Ie8I3/zw2znagoM87vjVHaQHWUCrgYqKEh6GamLP/atxoizrlurvCEqC0VMSrllzuio893X+SOkHxTr",
+ "3q79hCnlPuszFdKajlBa8gadrvDy49Ajhx290T+ZWZ+zYEqFEbWGKShbAEv0haTAKKBe17p2fPP7Kjnm",
+ "QwuONSL6qrxC0xjWWQwRYehMXtD84dVxTJQ/RHy411ziCw31uRDJFpXqdt73t3TU3IHudn9T8xM0H/wT",
+ "zB5FbZpuKGcXrAuQ+7ppWA+I5iQXzUMEOCS5xDGtEXT/JVm4CMtSQsoU6wSfX/oScLX6ghVRm0d+tutL",
+ "u9b5i9B3IGMn8IqSvGvKSWmB/K6BsDn7f3Ig3cDJjVJ5jPp6ZBHB3yD3PdltcgueJfCyjiPmPxlxP9Ys",
+ "N1hKbKVhUucORn/esgPbQoSdOAwh4Z7twYED+ob24H666tjl4TpwQysF/XWOvmdbuI1csc3axjoz+sgd",
+ "9kHoxRgfRLxomumOThCLEKw4SBBU8tv+b0TCEkuKC/LkCU7w5MnUNf3tafuzYVxPnkQFsAdzf7Re/nXz",
+ "xijml6G4PRubNhAi2tmPiuXZLsJoBfw25f0xpPVXFxr9pzww8Ku1hPaPqivNfBPHa3cTEDGRtbYmD6YK",
+ "QnlHRPG6brPo28wK0koyvcGMbW84Y79GK+F8V9vana+mzvFzt7wW51Dn/DeW+Up5OeI7Yd9yLow0jG5v",
+ "jW89vbmiRZmDOyhfP1r8HZ7943m292z/74t/7L3YS+H5i1d7e/TVc7r/6tk+PP3Hi+d7sL98+WrxNHv6",
+ "/Oni+dPnL1+8Sp893188f/nq748MHzIgW0AnPj9o8j/xFY7k8OQ4OTPANjihJaufeDNk7AuA0xRPIhSU",
+ "5ZMD/9N/9ydsloqiGd7/OnHpB5O11qU6mM8vLy9nYZf5Ck1xiRZVup77efpPa50c16HRNqUVd9RGvRpS",
+ "wE11pHCI396/OT0jhyfHs4ZgJgeTvdnebB8fzimB05JNDibP8Cc8PWvc97kjtsnB5+vpZL4GmqPnyvxR",
+ "gJYs9Z/UJV2tQM5cJXTz08XTuRea5p+dGfJ627d5WFRw/rllrc129MS6a/PPPp14e+tWvq6zUgcdRkIx",
+ "PKV9PHb+GYX2wd/nqB9Zcpx770K8ZQvgz/qKZdfdHu65xvnn5v3Ua3tec4j5EmxQPQ2eW50SpgldCIkp",
+ "tTpdmyPqc/mYaj+3W9PbcWbozPR6Xb8lG5QxOvjQUy3sQMSPhIfSUFxzZlozNWxRywrCyjo102+1b1j/",
+ "h73k1afP+9P9veu/Gdbu/nzx7HqkU/B18xTtac23Rzb8hIlwaN7Eo/R0b+8OLy0d8vBdXNyk4EGv6PPY",
+ "VZkUQyYDt1WdgUiNjB0JO53hBx7jfH7DFW81wrRCcCIPJ3xDM+LzTHDu/Yeb+5ijS9awWGKvkOvp5MVD",
+ "rv6YG5KnOcGWQQZ2f+t/5udcXHLf0tz3VVFQufHHWLWYgn8hGm8VulJokpPsgmqYfEKbbywgcYC5KE1v",
+ "wVxOTa8vzOWhmAtu0n0wl/ZA98xcnt7wgP/1V/yFnf7V2OmpZXfj2akT5Wwq49w+7thIeL1C/yuI5lRi",
+ "diPd9mpzl8N+B7r3CPXkjizmT3uP+v/vc/J87/nDQdCuUv0DbMg7ocm3aEz+i57ZccdnmyTU0YyyrEfk",
+ "lv2D0t+IbLMFQ4ValS79KCKXLBg3IPdvl/6zh71Hos9hQ2xskPcBc5FBTx66viMP+Mu+Z/2Fh3zhIdJO",
+ "/+zhpj8FecFSIGdQlEJSyfIN+ZnXyeO3V+uyLBp33T76PZ5mtJFUZLACnjiGlSxEtvGFA1sDnoM1YvcE",
+ "lfnndvVvaygbNEsd4e/1o4R9oBcbcnzUk2Bsty6n/WaDTTsaY0Qn7IK4VTPs8qIBZWwbmZuFrIQmFguZ",
+ "W9QXxvOF8dxJeBl9eGLyS1Sb8Iac7p089VVUYnWGqO5PPUbn+FOP671sdF+fiekvNj4dMhJ8sIlUXTR/",
+ "YQlfWMLdWMJ3EDmMeGodk4gQ3W0svX0GgaG4WfcNHQx08M2rnEqiYKyZ4hBHdMaJh+ASD62kRXFldTTK",
+ "CVwxG7UZ2bD71du+sLgvLO4v5LXazWjagsiNNZ1z2BS0rPUbta50Ji5t9cEoV8TC/DR3VXwxorSO2dCC",
+ "+AGajFfyk6sWkG8wjJZlRozTrAAjUtW8znT2eQxNgLcZoXlMecU4ToCsAmex5appkEumIBXcPj3a8bU5",
+ "yN5ZnTDGZH+vADmaw42DcTJtOVvcNkaKQ99Z/ur7Rq632NKRKmzsez8eo35ctPX3/JIynSyFdHmmiL5+",
+ "Zw00n7siXJ1fm4ISvS9YJSP4MQjsiP86rx9TiH7sBq/EvrqIEd+oiU4Lo71wg+s4rw+fzD5hLV63903w",
+ "0sF8jslZa6H0fHI9/dwJbAo/fqq35nN9Lbstuv50/X8DAAD//89W3bJFvAAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index 17485641d..8221e7731 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -366,6 +366,19 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// LightBlockHeaderProof defines model for LightBlockHeaderProof.
+type LightBlockHeaderProof struct {
+
+ // The index of the light block header in the vector commitment tree
+ Index uint64 `json:"index"`
+
+ // The encoded proof.
+ Proof []byte `json:"proof"`
+
+ // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
// ParticipationKey defines model for ParticipationKey.
type ParticipationKey struct {
@@ -443,6 +456,32 @@ type PendingTransactionResponse struct {
// StateDelta defines model for StateDelta.
type StateDelta []EvalDeltaKeyValue
+// StateProof defines model for StateProof.
+type StateProof struct {
+
+ // Represents the message that the state proofs are attesting to.
+ Message struct {
+
+ // The vector commitment root on all light block headers within a state proof interval.
+ BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"`
+
+ // The first round the message attests to.
+ FirstAttestedRound uint64 `json:"FirstAttestedRound"`
+
+ // The last round the message attests to.
+ LastAttestedRound uint64 `json:"LastAttestedRound"`
+
+ // An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.
+ LnProvenWeight uint64 `json:"LnProvenWeight"`
+
+ // The vector commitment root of the top N accounts to sign the next StateProof.
+ VotersCommitment []byte `json:"VotersCommitment"`
+ } `json:"Message"`
+
+ // The encoded StateProof for the message.
+ StateProof []byte `json:"StateProof"`
+}
+
// TealKeyValue defines model for TealKeyValue.
type TealKeyValue struct {
Key string `json:"key"`
@@ -637,6 +676,9 @@ type DryrunResponse struct {
Txns []DryrunTxnResult `json:"txns"`
}
+// LightBlockHeaderProofResponse defines model for LightBlockHeaderProofResponse.
+type LightBlockHeaderProofResponse LightBlockHeaderProof
+
// NodeStatusResponse defines model for NodeStatusResponse.
type NodeStatusResponse struct {
@@ -716,26 +758,8 @@ type PostTransactionsResponse struct {
TxId string `json:"txId"`
}
-// ProofResponse defines model for ProofResponse.
-type ProofResponse struct {
-
- // The type of hash function used to create the proof, must be one of:
- // * sha512_256
- // * sha256
- Hashtype string `json:"hashtype"`
-
- // Index of the transaction in the block's payset.
- Idx uint64 `json:"idx"`
-
- // Merkle proof of transaction membership.
- Proof []byte `json:"proof"`
-
- // Hash of SignedTxnInBlock for verifying proof.
- Stibhash []byte `json:"stibhash"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
+// StateProofResponse defines model for StateProofResponse.
+type StateProofResponse StateProof
// SupplyResponse defines model for SupplyResponse.
type SupplyResponse struct {
@@ -777,6 +801,27 @@ type TransactionParametersResponse struct {
MinFee uint64 `json:"min-fee"`
}
+// TransactionProofResponse defines model for TransactionProofResponse.
+type TransactionProofResponse struct {
+
+ // The type of hash function used to create the proof, must be one of:
+ // * sha512_256
+ // * sha256
+ Hashtype string `json:"hashtype"`
+
+ // Index of the transaction in the block's payset.
+ Idx uint64 `json:"idx"`
+
+ // Proof of transaction membership.
+ Proof []byte `json:"proof"`
+
+ // Hash of SignedTxnInBlock for verifying proof.
+ Stibhash []byte `json:"stibhash"`
+
+ // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
// VersionsResponse defines model for VersionsResponse.
type VersionsResponse Version
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index fa62e543f..185fbbca1 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -38,12 +38,18 @@ type ServerInterface interface {
// Get the block for the given round.
// (GET /v2/blocks/{round})
GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error
- // Get a Merkle proof for a transaction in a block.
+ // Gets a proof for a given light block header inside a state proof commitment
+ // (GET /v2/blocks/{round}/lightheader/proof)
+ GetLightBlockHeaderProof(ctx echo.Context, round uint64) error
+ // Get a proof for a transaction in a block.
// (GET /v2/blocks/{round}/transactions/{txid}/proof)
- GetProof(ctx echo.Context, round uint64, txid string, params GetProofParams) error
+ GetTransactionProof(ctx echo.Context, round uint64, txid string, params GetTransactionProofParams) error
// Get the current supply reported by the ledger.
// (GET /v2/ledger/supply)
GetSupply(ctx echo.Context) error
+ // Get a state proof that covers a given round
+ // (GET /v2/stateproofs/{round})
+ GetStateProof(ctx echo.Context, round uint64) error
// Gets the current node status.
// (GET /v2/status)
GetStatus(ctx echo.Context) error
@@ -391,8 +397,38 @@ func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error {
return err
}
-// GetProof converts echo context to params.
-func (w *ServerInterfaceWrapper) GetProof(ctx echo.Context) error {
+// GetLightBlockHeaderProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetLightBlockHeaderProof(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetLightBlockHeaderProof(ctx, round)
+ return err
+}
+
+// GetTransactionProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error {
validQueryParams := map[string]bool{
"pretty": true,
@@ -427,7 +463,7 @@ func (w *ServerInterfaceWrapper) GetProof(ctx echo.Context) error {
ctx.Set("api_key.Scopes", []string{""})
// Parameter object where we will unmarshal all parameters from the context
- var params GetProofParams
+ var params GetTransactionProofParams
// ------------- Optional query parameter "hashtype" -------------
if paramValue := ctx.QueryParam("hashtype"); paramValue != "" {
@@ -449,7 +485,7 @@ func (w *ServerInterfaceWrapper) GetProof(ctx echo.Context) error {
}
// Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetProof(ctx, round, txid, params)
+ err = w.Handler.GetTransactionProof(ctx, round, txid, params)
return err
}
@@ -476,6 +512,36 @@ func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error {
return err
}
+// GetStateProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetStateProof(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetStateProof(ctx, round)
+ return err
+}
+
// GetStatus converts echo context to params.
func (w *ServerInterfaceWrapper) GetStatus(ctx echo.Context) error {
@@ -771,8 +837,10 @@ func RegisterHandlers(router interface {
router.GET("/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
router.GET("/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
router.GET("/v2/blocks/:round", wrapper.GetBlock, m...)
- router.GET("/v2/blocks/:round/transactions/:txid/proof", wrapper.GetProof, m...)
+ router.GET("/v2/blocks/:round/lightheader/proof", wrapper.GetLightBlockHeaderProof, m...)
+ router.GET("/v2/blocks/:round/transactions/:txid/proof", wrapper.GetTransactionProof, m...)
router.GET("/v2/ledger/supply", wrapper.GetSupply, m...)
+ router.GET("/v2/stateproofs/:round", wrapper.GetStateProof, m...)
router.GET("/v2/status", wrapper.GetStatus, m...)
router.GET("/v2/status/wait-for-block-after/:round", wrapper.WaitForBlock, m...)
router.POST("/v2/teal/compile", wrapper.TealCompile, m...)
@@ -788,206 +856,215 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PbuLLgX8Hq3qokvqLkvOaeuGrqrhNnZrwnyaRiz7mPcXYCkS0JxyTAA4C2NNn8",
- "9y00ABIkQUl+JJnM8afEIh6NRqPR6OfHUSqKUnDgWo0OPo5KKmkBGiT+RdNUVFwnLDN/ZaBSyUrNBB8d",
- "+G9Eacn4YjQeMfNrSfVyNB5xWkDTxvQfjyT8o2ISstGBlhWMRypdQkHNwHpdmtb1SKtkIRI3xKEd4vho",
- "9GnDB5plEpTqQ/kzz9eE8TSvMiBaUq5oaj4pcsn0kuglU8R1JowTwYGIOdHLVmMyZ5BnauIX+Y8K5DpY",
- "pZt8eEmfGhATKXLow/lCFDPGwUMFNVD1hhAtSAZzbLSkmpgZDKy+oRZEAZXpksyF3AKqBSKEF3hVjA5+",
- "HSngGUjcrRTYBf53LgF+h0RTuQA9ej+OLW6uQSaaFZGlHTvsS1BVrhXBtrjGBbsATkyvCXldKU1mQCgn",
- "7354QR4/fvzMLKSgWkPmiGxwVc3s4Zps99HBKKMa/Oc+rdF8ISTlWVK3f/fDC5z/xC1w11ZUKYgflkPz",
- "hRwfDS3Ad4yQEOMaFrgPLeo3PSKHovl5BnMhYcc9sY1vdVPC+b/qrqRUp8tSMK4j+0LwK7Gfozws6L6J",
- "h9UAtNqXBlPSDPrrfvLs/ceH44f7n/7l18Pkf9yfTx9/2nH5L+pxt2Ag2jCtpASerpOFBIqnZUl5Hx/v",
- "HD2opajyjCzpBW4+LZDVu77E9LWs84LmlaETlkpxmC+EItSRUQZzWuWa+IlJxXPDpsxojtoJU6SU4oJl",
- "kI0N971csnRJUqrsENiOXLI8NzRYKciGaC2+ug2H6VOIEgPXtfCBC/rjIqNZ1xZMwAq5QZLmQkGixZbr",
- "yd84lGckvFCau0pd7bIip0sgOLn5YC9bxB03NJ3na6JxXzNCFaHEX01jwuZkLSpyiZuTs3Ps71ZjsFYQ",
- "gzTcnNY9ag7vEPp6yIggbyZEDpQj8vy566OMz9mikqDI5RL00t15ElQpuAIiZn+HVJtt/z8nP78hQpLX",
- "oBRdwFuanhPgqciG99hNGrvB/66E2fBCLUqansev65wVLALya7piRVUQXhUzkGa//P2gBZGgK8mHALIj",
- "bqGzgq76k57Kiqe4uc20LUHNkBJTZU7XE3I8JwVdfb8/duAoQvOclMAzxhdEr/igkGbm3g5eIkXFsx1k",
- "GG02LLg1VQkpmzPISD3KBkjcNNvgYfxq8DSSVQCOH2QQnHqWLeBwWEVoxhxd84WUdAEByUzIL45z4Vct",
- "zoHXDI7M1viplHDBRKXqTgMw4tSbxWsuNCSlhDmL0NiJQ4fhHraNY6+FE3BSwTVlHDLDeRFoocFyokGY",
- "ggk3P2b6V/SMKvjuydAF3nzdcffnorvrG3d8p93GRok9kpF70Xx1BzYuNrX67/D4C+dWbJHYn3sbyRan",
- "5iqZsxyvmb+b/fNoqBQygRYi/MWj2IJTXUk4OON75i+SkBNNeUZlZn4p7E+vq1yzE7YwP+X2p1diwdIT",
- "thhAZg1r9DWF3Qr7jxkvzo71KvpoeCXEeVWGC0pbr9LZmhwfDW2yHfOqhHlYP2XDV8Xpyr80rtpDr+qN",
- "HAByEHclNQ3PYS3BQEvTOf6zmiM90bn83fxTlnkMp4aA3UWLSgGnLDgsy5yl1GDvnftsvprTD/Z5QJsW",
- "U7xJDz4GsJVSlCA1s4PSskxykdI8UZpqHOlfJcxHB6N/mTZalantrqbB5K9MrxPsZARRK9wktCyvMMZb",
- "I9CoDVzCcGb8hPzB8jsUhRi3u2doiBnem8MF5XrSPERajKA+ub+6mRp8WxnG4rvzsBpEOLENZ6CsXGsb",
- "3lMkQD1BtBJEK4qZi1zM6h/uH5Zlg0H8fliWFh8oEwJDcQtWTGn1AJdPmyMUznN8NCE/hmOjgC14vja3",
- "gpUxzKUwd9eVu75qjZFbQzPiPUVwO4WcmK3xaDDC+21QHD4WliI34s5WWjGNf3JtQzIzv+/U+dsgsRC3",
- "w8SFzyeHOftywV+CJ8v9DuX0CccpcSbksNv3emRjRokTzLVoZeN+2nE34LFG4aWkpQXQfbGXKOP49LKN",
- "LKw35KY7MroozMEZDmgNobr2Wdt6HqKQICl0YHiei/T8Fs77zIzTP3Y4PFkCzUCSjGoanCt3XuKXNXb8",
- "CfshRwAZkeh/xv/QnJjPhvANX7TDmpc6Q/oVgV49Mw9cKzbbmUwDfHgLUtg3LTFv0StB+aKZvMcjLFp2",
- "4REv7TOaYA+/CLP0Rkl2OBPyevTSIQROGtUfoWbU4LiMOzuLTasycfiJqA9sg85AjbWlL0WGGOoOH8NV",
- "Cwsnmn4GLCgz6m1goT3QbWNBFCXL4RbO65KqZX8R5j33+BE5+enw6cNHvz16+p15kJRSLCQtyGytQZH7",
- "TowmSq9zeNBfGcqzVa7jo3/3xCuM2uPGxlGikikUtOwPZRVR9tKyzYhp18daG8246hrAXY7lKRj2YtFO",
- "rI7VgHbElLkTi9mtbMYQwrJmlow4SDLYSkxXXV4zzTpcolzL6jYeHyClkBFVCB4xLVKRJxcgFRMRrfZb",
- "14K4Fl4gKbu/W2jJJVXEzI1auopnICcxytIrjqAxDYXadqHaoU9XvMGNG5BKSdc99Nv1Rlbn5t1lX9rI",
- "90ofRUqQiV5xksGsWrRk17kUBaEkw454cbwRGZh3R6VugVs2gzXAmI0IQaAzUWlCCRcZ4COlUnE+OmDi",
- "Qt06mgR0yJr10t7TMzACcUqrxVKTqiSo8O5tbdMxoandlATvVDWgEaxVubaVnc6aT3IJNDOCMnAiZk7t",
- "5hSCuEiK2nrtOZHj4pGnQwuuUooUlDIPHCu2bgXNt7O7rDfgCQFHgOtZiBJkTuU1gdVC03wLoNgmBm4t",
- "djldZR/q3abftIHdycNtpNK8cSwVGBnPnO4cNAyhcEecXIBEnd1n3T8/yXW3ryoHLOpOUjllBT6VOOVC",
- "QSp4pqKD5VTpZNuxNY1a4pRZQXBSYicVBx54rr+iSlvNLeMZitaW3eA89h1vphgGePBGMSP/zV8m/bFT",
- "wye5qlR9s6iqLIXUkMXWwGG1Ya43sKrnEvNg7Pr60oJUCraNPISlYHyHLLsSiyCqaz2HM230F4faAHMP",
- "rKOobAHRIGITICe+VYDd0Ko4AIh5h9U9kXCY6lBObcocj5QWZWnOn04qXvcbQtOJbX2of2na9omL6oav",
- "ZwLM7NrD5CC/tJi19uQlNTIwjkwKem7uJpRorYq5D7M5jIliPIVkE+WbY3liWoVHYMshHXhMOI+VYLbO",
- "4ejQb5ToBolgyy4MLXjgZfOWSs1SVqIk8VdY37papDtBVENCMtCUGWk7+IAMHHlv3Z9Ym0F3zOsJWjsJ",
- "oX3we1JoZDk5U3hhtIE/hzWqSt9aY/RpYMK+BUkxMqo53ZQTBNSbuMyFHDaBFU11vjbXnF7CmlyCBKKq",
- "WcG0tt4FbUFSizIJB4g+8DfM6FQs1pDrd2AXnc8JDhUsr78V45EVWzbDd9oRXFrocAJTKUS+gyq6h4wo",
- "BDupqkkpzK4z58ziPR48JbWAdEIM6tdq5nlPtdCMKyD/LSqSUo4CWKWhvhGERDaL16+ZwVxg9ZxOKd1g",
- "CHIowMqV+GVvr7vwvT2350yROVx6DzDTsIuOvT18Jb0VSrcO1y28eM1xO47wdtR8mIvCyXBdnjLZ+rR3",
- "I++yk287g9fqEnOmlHKEa5Z/YwbQOZmrXdYe0siSquX2teO4Oyk1gqFj67b7LoWY35IiLe4BgI8TZ9Q3",
- "rci84haoSrnnCNq5vEJDzMe1l4f17j4g6AKwpF4b5/589PS70bgx3dffzZ1sv76PSJQsW8UcNDJYxfbE",
- "HTF8Td0zT4+1gqhVDBmzmEd8tECe525lHdZBCjBnWi1ZaYZs/EnWGlq+qP/3/n8c/HqY/A9Nft9Pnv3b",
- "9P3HJ58e7PV+fPTp++//X/unx5++f/Af/xpVK2o2i6s/fzK7JObEsfgVP+bWgDEX0r7H1k7ME/MvD7eW",
- "ABmUehlz/iwlKGSN1omz1MtmUwE6OpRSigvgY8ImMOmy2GwByiuTcqBzdELEN4XYxShaHwdLb544AqyH",
- "C9mJj8XoB018SJt4mM2jI1/fgvBiByKyjU//WFf2q5iHnrPuoKi10lD09V22628D0v47Lyv3DpXgOeOQ",
- "FILDOhoswji8xo+x3va6G+iMgsdQ3+5bogV/B6z2PLts5k3xi7sd8Pe3tWH7Fja/O25H1Rn6DKOqBvKS",
- "UJLmDBU5gistq1SfcYpPxYBcI+Yk/wAeVh688E3i2oqIMsENdcapMjisH5BRFfgcIlfWDwBeh6CqxQKU",
- "7gjNc4Az7loxTirONM5VmP1K7IaVINGmM7EtC7omc5qjruN3kILMKt0WI/HSU5rludO7mmmImJ9xqg0P",
- "Upq8Zvx0hcN5D0JPMxz0pZDnNRbiV9QCOCimkjjf/9F+Rfbvlr90VwHGmdjPnt98ab7vYY853jnIj4/c",
- "E+v4COXoRuPag/2LqeEKxpMokRm5qGAc/bc7tEXum9eAJ6AHje7W7foZ1ytuCOmC5iwzstN1yKHL4npn",
- "0Z6ODtW0NqKjVfFrfR9zG1iIpKTpOVqNRwuml9Vskopi6p+W04Won5nTjEIhOH7LprRkU1VCOr14uEXO",
- "vQG/IhF29Wk8clxH3boixg0cW1B3zlqf6f/Wgtz78eUpmbqdUvesF64dOnCfjGgDnIdQy2BlFm+jyKwb",
- "8hk/40cwZ5yZ7wdnPKOaTmdUsVRNKwXyOc0pT2GyEOTAOx0dUU3PeI/FDwZ6Bu5epKxmOUvJeXgVN0fT",
- "Bu/0Rzg7+9UQyNnZ+571o39xuqmiZ9ROkFwyvRSVTlx0QiLhksosArqqvdNxZBtbtGnWMXFjW4p00Q9u",
- "/DirpmWpus6q/eWXZW6WH5Chcq6YZsuI0kJ6Jmg4o4UG9/eNcE8uSS99aEulQJEPBS1/ZVy/J8lZtb//",
- "GEjLe/OD4zWGJtcltPRG13Km7eqMcOFWoIKVljQp6QJUdPkaaIm7jxd1gRrKPCfYreU16n0scKhmAR4f",
- "wxtg4biyBxwu7sT28mGm8SXgJ9xCbGO4U6P4v+5+BX6k196uji9qb5cqvUzM2Y6uShkS9ztTR58tDE/2",
- "1hjFFtwcAheoNwOSLiE9hwxjhqAo9Xrc6u4Nfu6G86yDKRtbZx3dMAAEVWwzIFWZUScDUL7ueuIr0NqH",
- "H7yDc1ifiiZ+5Cqu922HcDV0UJFSg8vIEGt4bN0Y3c13xmN0gi1L71eNPoSeLA5quvB9hg+yvSFv4RDH",
- "iKLlsDyECCojiLDEP4CCayzUjHcj0o8tz4g3M3vzRdQ8nvcT16SR2pwBOFwN+mHb7wVgoK64VGRGFWRE",
- "uBhT6/QccLFK0QUM6J5CLeeOrsUtzSgOsu3ei950Yt690Hr3TRRk2zgxa45SCpgvhlRQTdgx+/uZrCId",
- "VzAhmDrCIWyWo5hUexxYpkNlS9tsY+GHQIsTMEjeCBwejDZGQslmSZUPf8UoYX+Wd5IBPqMT/6aYrePA",
- "Yh2EAtcRWZ7nds9pT2/rIrd8uJaP0QqVtjvEW41Hzokqth2CowCUQQ4Lu3Db2BNKE1DQbJCB4+f5PGcc",
- "SBIzflOlRMps/HJzzbg5wMjHe4RY3RPZeYQYGQdgo4EIByZvRHg2+eIqQHIXEEH92GhaCv6GuCegdW8y",
- "Io8oDQtnfMAxzXMA6jwm6vur47eDwxDGx8SwuQuaGzbnlKjNIL0IIhRbO/FCzkT5YEic3aD6sxfLldZk",
- "r6LrrCaUmTzQcYFuA8SbRYnYFijEl3v61rgaukt3mXrg+h7C1f0g9uhaAHQ0EU16Hvfy2/pCa9/N/Zus",
- "YenjJpjWe2bGaH+IfqK7NIC/viK4jhZ6272uo4/0tumyHSgVyE8xVmzOSF812lfAKsgBJeKkJUEk5zGF",
- "uRHsAdntie8WvNwxHIvy9YPAHi5hwZSGRnVlbiWvi/3S5i6K4d9CzIdXp0s5N+t7J0TNo22YoTXfhcv8",
- "4iu4EBqSOZNKJ6j3iy7BNPpB4YvyB9M0Lii0Le42EwrL4rwBpz2HdZKxvIrTq5v3r0dm2je1EkZVs3NY",
- "ozgINF2SGWbuifrhbJjaumptXPAru+BX9NbWu9tpME3NxNKQS3uOb+RcdDjvJnYQIcAYcfR3bRClGxgk",
- "XvxHkOtYxFIgNNjDmZmGk02qx95hyvzYmx5KARTDd5QdKbqW4LW8cRUMvQ/Mc4/pIPFNP2xg4AzQsmTZ",
- "qqMItKMOPhfplV77PrC4gwXcXTfYFgwESr+YZ6oE1Y4hb6Rbm8KIh2ub7ISZ03akd8gQwqmY8gn4+ogy",
- "pI1Zorbh6hRo/ldY/820xeWMPo1HN9MbxnDtRtyC67f19kbxjAYxq0dqmQGuiHJallJc0Dxx2tUh0pTi",
- "wpEmNvfK2C/M6uI6vNOXh6/eOvA/jUdpDlQmtagwuCpsV34zq7Lh6gMHxCf4Mg8eL7NbUTLY/DqMONTI",
- "Xi7BJVMKpNFe8odG2x4cRaehncft8lv1rc4wYJe4wUAAZW0faHRX1jzQNgnQC8pyrzTy0A7Y0HFxu2UQ",
- "iXKFcIAbmxYCC1Fyq+ymd7rjp6Ohri08KZxrQ7qnwmY0U0TwrkuWESFRF4WkWlBM3WBVAn3mxKsiMccv",
- "UTlL4wpGPlOGOLg1HJnGBBsPCKNmxIoN2CF5xYKxTDO1w0O3A2QwRxSZPg3IEO5mwqWirTj7RwWEZcC1",
- "+STxVHYOKubKcKrm/nVqZIf+XG5gq55uhr+JjBGmLeneeAjEZgEjNFP1wD2qn8x+obU6xvwQ6OOvYO0O",
- "Z+xdiRss1Y4+HDVbl6Fl29wUZo7t8z9DGDbL2Pa0tf7x6vKnDMwRTUPLVDKX4neIv/PweRxxW/eJWhh6",
- "Tf4OfBKJ/umymFq702TTbWYf3O4h6SbUQrUt9ANUjzsf2KQwKYZXz1Jut9pmhWz5hcQJJvTlmtrxG4Jx",
- "MPf833J6OaOxjCFGyDAwHTbWz5YiWQviO3vcO503c7lzJiQwpNZtmQ3oKkE2ESX94OFrCgx22p1FhUYy",
- "QKoNZYKxNX7lSkSGqfgl5Ta5qOlnj5LrrcAqv0yvSyExHFPFdd4ZpKygeVxyyBD77fDVjC2YTa1ZKQhy",
- "N7qBbE5iS0Uu/6W1LzeoOZ6T/XGQHdbtRsYumGKzHLDFQ9tiRhVy8loRVXcxywOulwqbP9qh+bLimYRM",
- "L5VFrBKkFurweVNbbmagLwE42cd2D5+R+2izUuwCHhgsuvt5dPDwGSpd7R/7sQvA5dDdxE0yZCf/6dhJ",
- "nI7RaGfHMIzbjTqJBhfaxOfDjGvDabJddzlL2NLxuu1nqaCcLiDuJlFsgcn2xd1ERVoHLzyzWXuVlmJN",
- "mI7PD5oa/jTg82nYnwWDpKIomC6cZUOJwtBTk5jRTuqHsymAXfYgD5f/iAbC0ttHOo/IL6s0tfdbbNVo",
- "xn1DC2ijdUyojcHNWWO69wm/yLGP5Md0SnUWJYsbM5dZOoo5aMmfk1IyrvFhUel58heSLqmkqWF/kyFw",
- "k9l3TyIppNpZY/jVAP/ieJegQF7EUS8HyN7LEK4vuc8FTwrDUbIHjY91cCoHLZlxbzHP0bvOgpuH3lUo",
- "M6Mkg+RWtciNBpz6RoTHNwx4Q1Ks13Mlerzyyr44ZVYyTh60Mjv0y7tXTsoohIzldWmOu5M4JGjJ4AId",
- "1+KbZMa84V7IfKdduAn0X9fy4EXOQCzzZzn2EHhesTz7WxMz0snCJylPl1G9/8x0/K3Jklwv2Z7jaBqR",
- "JeUc8uhw9s78zd+tkdv/72LXeQrGd2zbza5nl9tZXAN4G0wPlJ/QoJfp3EwQYrXtRF97XeYLkRGcp8lZ",
- "0VBZP2FgkEHrHxUoHQvaww/W8wP1O+ZdYBM4EeAZStUT8qOtcrIE0gqpR2mWFVVuw7MhW4B0iseqzAXN",
- "xsSMc/ry8BWxs9o+NuWnTSC1QGGuvYrOuz5IcLObD6HP3hn3b959nM0Ol2bVSmOGC6VpUcZCV0yLU98A",
- "42NCXSeKeSF2JuTIStjKy292EkMPcyYLI5nWo1kejzRh/qM1TZcoura4yTDJ7575zFOlChLD13le6xw1",
- "eO4M3C75mc19NibCvC8umbLFLeAC2tEydeiYezr56Jn28mTFuaWUKI/eFNp4HbR74KxB26tDo5B1EH9F",
- "wcUmDrxqIrgT7BVN+tDNKtfLCG+jiusUpb5oUUq54CzFlAtBOY0aZFcoYxdbwQ7ZKbrKKH/E3QmNHK5o",
- "LrvanchhcTC7nWeEDnF9ZWXw1WyqpQ77p8aKDEuqyQK0cpwNsrFPyej0JYwrcDmHsGZKwCeFbNlfkENG",
- "TXpJrfq9Ihmh7/yAAPyD+fbGPY/QqfSccRSEHNqc/6rVaGAef22kJ6bJQoBy62mH5qtfTZ8JhqdnsHo/",
- "8Xn/cQxrvjDLtra6/lCH3nLnLGWm7QvTllivw/rnlpuinfSwLN2kwwk7o/KAXvFBBEcsMIlXgQfIrccP",
- "R9tAbhtN7nifGkKDCzTYQYn3cI8w6uSVnWy9FzSvLEVhC2JdXaLxlYxHwHjFODRVKSIXRBq9EnBj8LwO",
- "9FOppNqKgDvxtFOgOVrpYgxNaaeivelQnQ1GlOAa/RzD29jk3RxgHHWDRnCjfF0XwzDUHQgTL7AKj0Nk",
- "P4smSlVOiMrQ7biTVzPGOAzj9pl72xdA/xj0ZSLbXUtqT85VbqKhSLJZlS1AJzTLYsnanuNXgl9JVqHk",
- "ACtIqzrZVVmSFCO22yHsfWpzE6WCq6rYMJdvcMPpUhGTo9/gBMr7VTeDTwiyX8N6j16+fffyxeHpyyN7",
- "X5hnuQ0lMzK3hMIwRPOOVRqM6FwpIB9CNH7Afh86C46DGeTTjRBtmNPXEyI61M/W+G8sIdUwATmb+pW9",
- "urwBHTteWbxvj9QTzs3RSxRbJLtjAq++m6Ojmfp657Hpf6sHMheLNiBfOHPMJmYc7lGMDb8091sYBd7L",
- "smZvwDpIG32ohE/Nj6/bOrywzTzxxu2lXUPdfZ1lfbP2ZDhf+hjv6AFPyiBfDrVigDUGDflTpoPuv1S7",
- "KBxNyUZOiUnOYyNYZwybXN3WZYwqwoYcMKz/hfnc672bANt7DuDYGxHqPXv6AP3Vuw2SkjJn6WyYRR+z",
- "zsG47/K9i+ths8HdRTi3XRwktpJeNsXNFNJz2w5CD2zSu8nu4f+HtRkZjVuYsnwB3OUsbztk7uwWNp9D",
- "qtnFFjf5/zRPi8YFe+wfH7YgRuA1z2o3I1++84pvogagTV7sG+EJcozcGJwhJ9lzWN9TpEUN0Sx8Y0+o",
- "14kuRQxg/pXEkIhQMTON1ZY4zTlTNWUgFrxZ1HaHJvXVYPrjIOjjmnN5kiQ0DATZMOWFiD23dprLdL1S",
- "eBR6zAx50vcTkA7fXkeY71XVqevr+pyBKGpe1d3seJcuuhWDGmoFoY9zBeV/8xFMdhZb97VJ0Izq2Esq",
- "M98i+r7wT5dkwDet6+1tnepZHOh5PTNrnFj6Ds+RrBDoqpTmQjG+SIb8vdp+I2HpKLSOoSYHM7siXHOQ",
- "LjG79mV1Ey2808smODahwpU5ug4S1GCOQwvcYHz0uyYAHFNhUVtU2Vn+wgWaxwY10MkgTHt4zk3IfmG/",
- "ew9fnwpph2eUo9dka5y1d19iqofEkOrnxN2W2z2Hr/NUYZzbuhcqFrPNDSpDlV8pRVal9oIOD0bzMNw1",
- "I8IGVhKV8tP+KnsCW475QV4FcRjnsJ5aoSldUt4kamkfa5u60a4hiHvs7PatvuLiAmu+sAtY3AqcX/Ml",
- "NB6VQuTJgI7vuB963j0D5yw9h4yYu8Mb/gdSIJP7qFqqjTiXy7UPtS5L4JA9mBBi3lJFqdfentNOutaZ",
- "nN/Tm+Zf4axZZbNBuEfa5IzHfVZsmfIb8jc/zGaupsAwvxtOZQfZEtu9Ggh7l/QykhB815pvEQtLN0lz",
- "Q1QWipiUcs1Av53Od/+hFiH9MERjy/vnvPWqs2mFOlYVIeGWX3eBOvmKr7t+8Mmuy8N1IFerFPTXufMG",
- "tHA7gPtdEN+oJvrIHdYo6NkuGoV4ChTTHVUaFiGYP4ggqOTDww9EwhzzCQqyt4cT7O2NXdMPj9qfzetr",
- "by96Mr+YMqNVWs7NG6OYvw1Z4a2lecDho7MfFcuzbYTRct9pcnuig8pvztHpq2QX/c0+kftH1SVavIoa",
- "tbsJiJjIWluTB1MFjjk7+OS4bpNo8T8FaSWZXmP8lX9Rsd+ice0/1koYV6+09th3DuNanEMdwdeobJpi",
- "7j8KWyywMHc9KrE1Vj94uaJFmYM7KN/fm/07PP7Lk2z/8cN/n/1l/+l+Ck+ePtvfp8+e0IfPHj+ER395",
- "+mQfHs6/ezZ7lD168mj25NGT754+Sx8/eTh78t2zf7/nK6lbQJsq5f+FKXiTw7fHyakBtsEJLVld9MSQ",
- "sU/nSVM8ieZNko8O/E//25+wSSqKZnj/68g5E46WWpfqYDq9vLychF2mC3yjJVpU6XLq5+kXm3h7XDs6",
- "2QAV3FHrw2JIATfVkcIhfnv38uSUHL49njQEMzoY7U/2Jw8xa3YJnJZsdDB6jD/h6Vnivk8dsY0OPn4a",
- "j6ZLoDmmUjd/FKAlS/0ndUkXC5ATl9fU/HTxaOr9JKYf3fv0kxl1EYtMsy5bYT3mXrpPp+tCu5d1yWql",
- "z1Ium9O4TqrmxEeeoSeNffIZ1lYj6zhrEqgcB0V+XRiZjas/+DWSZnrOFpXslGmqtfku4yJTxNbclOS1",
- "1bm/pel56K0Sq5rvWFmsaL7zaSnUomwbgBtNf6ygSyxvKs5s9jmg1FpV1HAiLSsIIWn4quGV+8mz9x+f",
- "/uXTaAdAUG/pygZ/oHn+wdbRghUqf9oVvNV4qFz8uFE9dIp0j9GCXX8N83nWbdp+Ux+44PBhaBscYNF9",
- "oHluGgoOsT14jw7tSAl4iB7t73+GQt/j1iieJL5qxfAnt7jQtgXtxsvtDtdb9HOaYY5FUNou5eE3u5Rj",
- "jqYDw/GJvdE+jUdPv+G9OeaG59CcYMsgWqx/i/zCz7m45L6lkWaqoqByjbJKkAg2lEo/Dd5W0zBp3fRj",
- "S7Gc3egu6+XrPD7acr3dU0NMsZ9GoZMTz3yvs76h6tEl/oMVU1o9mJAfw97ImDEqwfr8V5I3laVKKS5Y",
- "Zliss8n54M0GtnsqDNiIXrbBa/3u3v2s9+5hW+vQisOPAdMi8Y0w9SxPN734+l5inZTm10oZHmTfu0YO",
- "o8+aV7Vbg3yo2OMODPYOd0OFMgfEmwDeWtJpZ038/HzXvt+Ca6J1H3xGrvyNC2uvaW7oJFhux2PdJqe4",
- "E+L+aYS42hnBVibBfEybxDpMujr96HOJ3IIo53Kp7CDEhS/doG+Q6+J+h1M8mNjEIGGb67ED51iwVTzD",
- "DC93gtnnFsz6qZFiYDQJb76eMIYwLJvcSVcpB9JKdXylHE/fqPT1T4ysQXHLQLpd0LoGb+wJUY4Tfzae",
- "+acUnhzS7sSmf2qxyfrybRCcWnnLnOPnsOwEQZXyoPJLy/FstvZ0OCZKSOf+VEomJNPrMWGcZGDOHloM",
- "hcQQ7abeuXMyAo7/fX34X+h6+vrwv8j3ZH9ci2AYwRaZ3jr3tGWgH0FH6vE/Xx/W4sBGWegPI2Cc1kga",
- "qJevhU89hkgr6Or7IZStrF0xJp4VdDXaKImMvx1p8aZCUyf2tE9FruSorbfvyuS0XaoUgRVNdb4mFO+f",
- "tfX9xUrsPm9Yp3Z8p55/NN5ow4y+CkcsauyqXl2RAH+sdrEZvtNOjqUWOlx+Pix5s10w6SEjCsH1pLy7",
- "3f1md7cvlpJSmDPNMIFEc5/4u6oFZFOLwYE74LA6If8tKnR2saXGIJb8FGdA514/pxNAg+zFORZ6q7Gz",
- "t9dd+N6e23OmyBwukYNSjg276Njb+xOIrKs65yQlXPCEYyWsCyCBh9yd3PqHlluf7j/+ZldzAvKCpUBO",
- "oSiFpJLla/ILr5P03Ewsr3lOxYO0SRv5T89TvpGiA/H9Rrbrrm2a6UYybAVOBSqEumCheyuPm4oH5i2P",
- "yVV8wLoae9MJOv5Zq4rdj3HPsDKJCemBBef5+vhoF7n8GzGE7pzkK3Kvxffmc98AUX+ad1/Gn2Y3Zvpk",
- "/8mXgyDchTdCkx9QXfaZWfpn1R3EySpgNle2qDQWk5C1uEDEjUzFnNCxS8yKmULXpA4UMvzEMkJbmqHP",
- "NcwMu/KLP7B+foeixBG67KL3ji/c8YUb8YUuQTUcAcPt1fQjmgpCdtA7ks9Nyz+RiTGwt0hReIOLIHPQ",
- "6dKmIeiGxUTYis8VOMxTNmXUv2X7HwIdSVWFa3GhH5jpfceAQOz4k43E+DQepSAjxPezz4djPrM5hnXW",
- "eSB94Qg05zCfS7lOo+ySzTPlfc5d1htidvFKUL5oJu+H6SBabsNmeIfgqyG4x9ReuqzW9ni5RfwZvNJ9",
- "yuOEvEFxCA+4T4P4Z1R7fM4b+XMv6I3gYO3SRmK1tHhngqzFBaw9g0jxWRCs4dGVs42LDm2j40e9Ytmn",
- "aZ2mZ0ioeIsNtggVzU3NmoqfbfUKLUugUl37kt5uDjvtzHh8FPpptLIK1fmEIqAYvFzRkvhvox2lGQz4",
- "EXOypGpJ5hW3gNa1pdBlxTtRiPm4Vtaa0yDmB+SM7xG1pE8fPvrt0dPv/J+Pnn43II+ZeVz8cV8iawYy",
- "n+0wu4hlf16zY1uUqJF38KW38mo7NB6xbBVNIQIrnwkpPBdO94nM4Z4iJV0PZh4aSOL1GuR57suzt408",
- "pABzoaolK79GuXk2i1dc+snskpiTOg/6MX9e888LkGyOZcNqvvCFM8NIgAxKvdyYksFWPSv1stlUcHU5",
- "mXKpb0opLoCPCZvApGsMyxZNSuEc6LxOnSLELq5qAS8x9OaJI8B6uJBdRM23MfrBcEiXYu5LK1Ualy57",
- "mXnkyc698lU1LvqraFzeCJ6gPAZc+7dBCy1fT/uC2W7GgYKzrizBhUbFppAoRoZsS012EsBg0NjU4oHW",
- "dXKQjJ04llKdLqty+hH/g5kHPjUx/raMytQqYjdJZCe2xa262NgxiWxzG5/swimHxZy8ZqkUh5gVyV0j",
- "aq00FP0im7brb5sKdESvHMFzxiEpBI/lyfgZv77Gj9G8S2i2H+iMDhRDfbulkVrwd8Bqz7MLq7spfid/",
- "DCXvjR4sndVKKGs3RfTnQPpvTksr8W1zTFo/Tz+2/nT2EtdSLSudicugr81rsfFs2Ra3erbeiAzsuO1U",
- "MjH/US4ycOk3+keq5hpxidTjt2nXEQ5SWi2W2haJjFagrTsmNLVHweaOVduSbdpWPqncBRCaS6DZmswA",
- "OBEzs+h20mJCVV31F4nD8sZ4zsgGrlKKFJSCLAmrQ20CrU5qgpKP3oAnBBwBrmchSpA5ldcE1jKJzYB2",
- "yyLW4NaaQscH+lDvNv2mDexOHm4jlUA8Q8QXjSjKHNybJoLCHXGCsjb7zPvnJ7nu9lUlFiCKZD21X09Z",
- "gXk7OOVCQSp4poZzE287tpiNOFiLAltz15+UaF0XM/DA1fqKKu3qX7VSOAY5rc0UG5IpDyUkMyP/rU5H",
- "1hs7NfySq0o1pcGs7AVZtOoqrDbM9QZW9VxiHoxdC3e2IvS2kYewFIxfFwsLsiPrQItlhossDoNgqBPF",
- "+qhsAdEgYhMgJ75VgN1QwzIACFMNouuUp23KCao1Ky3K0pw/nVS87jeEphPb+lD/0rTtE5cLHkC+nglQ",
- "oeDtIL+0mLV1AJdUEQcHKei5k9kXzoe/D7M5jIliPHUp3Yfis1gBJ6ZVeAS2HNKu2Bce/9Y56xyODv1G",
- "iW6QCLbswtCCY4LmH0IsvOq7r6u3+4yq8ragHYhXjaBp/55eUqaTuZAuXT5Wmo9Y3TvZuCjTyj3/7KtY",
- "C6fqdrXqLUNx4wRVMFXoAG1B8EE4Zvf7Pjdmqh+E3MnI3+jjtSBmYaTimvlIanPeahnzj2cxv5Oe76Tn",
- "O+n5Tnq+k57vpOc76flOev7c0vPX8dolSeL5tDcNxwKyyOiblPC/oZinLxmk1Aj9tciPjwQjoptzvNGb",
- "RwPNp672NHorRCuS2rCAsI51aqZjnJQ5NdIQrLQPTiczquC7J94no66BadP3G15jGjx+RE5+OvSOCktn",
- "SW+3ve8ryym9zuGB83qs82t790fgFCtuovcj9a+f1DmUWGF+znIgyuDqJbY+ggvIjSRvjZ/EvEX6r6NT",
- "oPkLh5stj6NWBmUz2odx603m0FbQ0os8fq1UEYpOLZ0EyHOaq+EMyHa8gpax8P6aT9tnE7KG5yJbd8jd",
- "7NoUN7BN6I2fAuNURmpA98m7RxpaYB14V9S89+77dOtONX2i7ZPZNgqLl5KJ12reROXDpcTNhvWGsh5N",
- "8w6dRNP/d30nRjWAuxgMDT37PSGuCPVXva0IQuSOWMOZ/zCBJ93aeo5pYFsjUDnW860GiXjER08vnv2x",
- "rz1GmFbEUdwqMY0WwBPHW5KZyNZJizO1L5imJO/WSyZkjXiY6nulrnA/eAV9nRsiKPs82sRuQ3pYJY63",
- "DjBe6yC2G9utsYUjOs4bYPxzc98hDhmCQBzrib2du9nLrsjPgnLPdzztjqcFp7Fz2TPufBO7TGRyPZ6G",
- "FdKH2dlLWzBQkfCQ3lcPDMtCjK50S3OfwaxaLGyVvK4WGrNo1cUevw6Xs8vdlcFdjTjs4HXo6U2jJrrD",
- "9RlH4FR3X0iykKIqH9ichnyNCs6ipHztjRrm5V9UuSt+i5Fet8tD64qNPbnRK9eG9XJvvfot0D65W7T9",
- "u0UL1nm0+wsZqXgGMl5ObdUpkrUd46cr3nDgjSW0fDHB3urcvLtwf7/LLkKgNuSUtrSqPVCtw+T8lO3J",
- "ndyFV/9z3Ahvbe7QAQbb97JtGML2i0EGLAtvhk6yLX81tPnpO3oZpu66LaFx99f6EvBOrF+vkcxkRoyU",
- "gmYpVajU4KAvhTz/zLKkXh1HtMgIJmaY7AeemDfJZKtQiePuJFK2Y738q7yaFUzZqnxfV7hsogkOXcBu",
- "Cxt3it0/i2L3uT98ilAs8Ns5nNaGg2dyBzZFL/WKR7nUtLQZqof8l4MD4XJZ36onRm/4tkNGkB/aGpQh",
- "Lwklac7Q3Cy40rJK9RmnaNDqVD/uOGt4M92wKPXCN4nbVCMmTzfUGTdC1ZzUZq6oSDWHiAH7BwAvsalq",
- "sQClO5x4DnDGXSvGScWZxrmwmHRi/frNdW04+sS2LOiazGmOFtnfQQoyM4+IMGsZmoeUZnnuvEPMNETM",
- "zzjVJAfD9F8zI9CZ4bwFofZ4snRXY2GgSL4tT5nEtbM/2q8YQ+eW760AaKywn320y/jrFJFNWDYI+fGR",
- "yyh6fIRJ4hq/kB7sX8xZoGA8iRKZufGdf1WXtsh9I+N5AnrQeJi4XT/jRpjWgiCjp/p65NA16vbOoj0d",
- "HappbUTH9uvX+j6WzWIhEvNkpAvz+4LpZTXDMq4+y8V0IeqMF9OMQiE4fsumtGRTVUI6vXi4RT64Ab8i",
- "EXZ1d3P/eUyyIR2Y01JvPFZO6O79wL18Cwnc/9hZ27c6nN7lSL/LkX6XRfsuR/rd7t7lSL/LIH6XQfyf",
- "NYP4ZKOE6LJubc3pq3uqTUokpHbmmoGHzVrZf/tWSaYnhJwuDf+n5g6AC5A0JylVVjDi1u+5YIulJqpK",
- "U4Ds4IwnLUhSUbiJ7zf/tc/cs2p//zGQ/QfdPlZvEXDefl8UVfETmprI9+RsdDbqjSShEBfgcoFi86xC",
- "9xfba+uw/6se92fZ27qCrq1yZUnLEsy1pqr5nKXMojwX5jGwEB1vbS7wC0gDnM17RJi2adcRn+jl7nxi",
- "qMsmEhO6+/f7FYpGHnaz03zRtGZ/XgF7E5/qb9jt8cCNY/cY4h3L+BIs46szjT9RBta7ZKt/sAWFhtRW",
- "NvUbSFJ1GdGI3snLSFadbHgzjgBpJZle4w1HS/bbOZj/vzd8XIG88JdfJfPRwWipdXkwnWK9k6VQejoy",
- "V1PzTXU+mvuBLuwI7nIpJbvAXMnvP/3/AAAA//+MsCXg4BkBAA==",
+ "H4sIAAAAAAAC/+x9aXPctrLoX8Gbe6u83OFIXu+xqlL3KbaT6B3bcdlK7hL5xRiyZwZHJMADgNJM/Pzf",
+ "X6EBkCAJckaLt0SfbA2xNBqNRu/4MElFUQoOXKvJwYdJSSUtQIPEv2iaiorrhGXmrwxUKlmpmeCTA/+N",
+ "KC0ZX06mE2Z+LaleTaYTTgto2pj+04mEf1ZMQjY50LKC6USlKyioGVhvStO6HmmdLEXihji0Qxw9m3wc",
+ "+UCzTIJSfSh/5vmGMJ7mVQZES8oVTc0nRc6ZXhG9Yoq4zoRxIjgQsSB61WpMFgzyTM38Iv9ZgdwEq3ST",
+ "Dy/pYwNiIkUOfTifimLOOHiooAaq3hCiBclggY1WVBMzg4HVN9SCKKAyXZGFkFtAtUCE8AKvisnBbxMF",
+ "PAOJu5UCO8P/LiTAH5BoKpegJ++mscUtNMhEsyKytCOHfQmqyrUi2BbXuGRnwInpNSMvK6XJHAjl5M0P",
+ "T8mDBw+emIUUVGvIHJENrqqZPVyT7T45mGRUg//cpzWaL4WkPEvq9m9+eIrzv3UL3LUVVQrih+XQfCFH",
+ "z4YW4DtGSIhxDUvchxb1mx6RQ9H8PIeFkLDjntjG17op4fxfdFdSqtNVKRjXkX0h+JXYz1EeFnQf42E1",
+ "AK32pcGUNIP+tp88effh3vTe/sd/+e0w+R/356MHH3dc/tN63C0YiDZMKymBp5tkKYHiaVlR3sfHG0cP",
+ "aiWqPCMreoabTwtk9a4vMX0t6zyjeWXohKVSHOZLoQh1ZJTBgla5Jn5iUvHcsCkzmqN2whQppThjGWRT",
+ "w33PVyxdkZQqOwS2I+cszw0NVgqyIVqLr27kMH0MUWLguhQ+cEFfLzKadW3BBKyRGyRpLhQkWmy5nvyN",
+ "Q3lGwguluavUxS4rcrwCgpObD/ayRdxxQ9N5viEa9zUjVBFK/NU0JWxBNqIi57g5OTvF/m41BmsFMUjD",
+ "zWndo+bwDqGvh4wI8uZC5EA5Is+fuz7K+IItKwmKnK9Ar9ydJ0GVgisgYv4PSLXZ9v/z9udXREjyEpSi",
+ "S3hN01MCPBXZ8B67SWM3+D+UMBteqGVJ09P4dZ2zgkVAfknXrKgKwqtiDtLsl78ftCASdCX5EEB2xC10",
+ "VtB1f9JjWfEUN7eZtiWoGVJiqszpZkaOFqSg6+/2pw4cRWiekxJ4xviS6DUfFNLM3NvBS6SoeLaDDKPN",
+ "hgW3piohZQsGGalHGYHETbMNHsYvBk8jWQXg+EEGwaln2QIOh3WEZszRNV9ISZcQkMyM/OI4F37V4hR4",
+ "zeDIfIOfSglnTFSq7jQAI049Ll5zoSEpJSxYhMbeOnQY7mHbOPZaOAEnFVxTxiEznBeBFhosJxqEKZhw",
+ "XJnpX9FzquDxw6ELvPm64+4vRHfXR3d8p93GRok9kpF70Xx1BzYuNrX676D8hXMrtkzsz72NZMtjc5Us",
+ "WI7XzD/M/nk0VAqZQAsR/uJRbMmpriQcnPC75i+SkLea8ozKzPxS2J9eVrlmb9nS/JTbn16IJUvfsuUA",
+ "MmtYo9oUdivsP2a8ODvW66jS8EKI06oMF5S2tNL5hhw9G9pkO+ZFCfOwVmVDreJ47TWNi/bQ63ojB4Ac",
+ "xF1JTcNT2Egw0NJ0gf+sF0hPdCH/MP+UZW5663IRQ62hY3ffom3A2QwOyzJnKTVIfOM+m6+GCYDVEmjT",
+ "Yg8v1IMPAYilFCVIzeygtCyTXKQ0T5SmGkf6VwmLycHkX/Ya48qe7a72gslfmF5vsZORR62Mk9CyvMAY",
+ "r41co0aYhWHQ+AnZhGV7KBExbjfRkBIzLDiHM8r1rNFHWvygPsC/uZkafFtRxuK7o18NIpzYhnNQVry1",
+ "DW8pEqCeIFoJohWlzWUu5vUPtw/LssEgfj8sS4sPFA2BodQFa6a0uoPLp81JCuc5ejYjP4Zjo5wteL4x",
+ "l4MVNczdsHC3lrvFasORW0Mz4i1FcDuFnJmt8WgwMvx1UBzqDCuRG6lnK62Yxj+5tiGZmd936vxtkFiI",
+ "22HiQi3KYc4qMPhLoLnc7lBOn3CcLWdGDrt9L0c2ZpQ4wVyKVkb30447gscaheeSlhZA98XepYyjBmYb",
+ "WVivyE13ZHRRmIMzHNAaQnXps7b1PEQhQVLowPB9LtLTazjvczNO/9jh8GQFNANJMqppcK7ceYnf2djx",
+ "J+yHHAFkRLD/Gf9Dc2I+G8I3fNEOaxR2hvQrAvN6ZvRcKz3bmUwD1L8FKaxqS4xKeiEonzaT93iERcsu",
+ "POK51aYJ9vCLMEtvbGWHcyEvRy8dQuCksQASakYNjsu0s7PYtCoTh5+IFcE26AzUOF36wmSIoe7wMVy1",
+ "sPBW00+ABWVGvQ4stAe6biyIomQ5XMN5XVG16i/CqHUP7pO3Px0+unf/9/uPHhu9pJRiKWlB5hsNitx2",
+ "0jRRepPDnf7KUJ6tch0f/fFDbzdqjxsbR4lKplDQsj+UtUfZS8s2I6ZdH2ttNOOqawB3OZbHYNiLRTux",
+ "plYD2jOmzJ1YzK9lM4YQljWzZMRBksFWYrro8pppNuES5UZW16F8gJRCRiwieMS0SEWenIFUTESM269d",
+ "C+JaeIGk7P5uoSXnVBEzNxrrKp6BnMUoS685gsY0FGrbhWqHPl7zBjduQCol3fTQb9cbWZ2bd5d9aSPf",
+ "234UKUEmes1JBvNq2ZJdF1IUhJIMO+LF8YItVzq4R19LIRbXLm5EZ4ktCT+ggZ3kpo+76axsgAC/EhkY",
+ "RalS18Dem8Ea7BnKCXFG56LShBIuMkCtqlJxxj/gmkOfALoydHiX6JUVLOZgJPiUVma1VUnQUN+jxaZj",
+ "QlNLRQmiRg1YMmsTtG1lp7Nun1wCzYxkD5yIuTMXOkMmLpKil0F71umunYiu04KrlCIFpYxGZuXsraD5",
+ "dpYs9QieEHAEuJ6FKEEWVF4SWC00zbcAim1i4NZyorOx9qHebfqxDexOHm4jlUYps1RghFJz4HLQMITC",
+ "HXFyBhJtjZ90//wkl92+qhyIBHCi1TErULfjlAsFqeCZig6WU6WTbcfWNGrJf2YFwUmJnVQceMC+8IIq",
+ "bS3OjGeoC1h2g/NYw4OZYhjgwSvQjPyrv/36Y6eGT3JVqfoqVFVZCqkhi62Bw3pkrlewrucSi2Ds+r7V",
+ "glQKto08hKVgfIcsuxKLIKprw4xzyfQXh+YLcw9soqhsAdEgYgyQt75VgN3QGzoAiFEc655IOEx1KKd2",
+ "wU4nSouyNOdPJxWv+w2h6a1tfah/adr2iYvqhq9nAszs2sPkID+3mLV+8BU1QjuOTAp6au4mFMGtabwP",
+ "szmMiWI8hWSM8s2xfGtahUdgyyEd0H5cpE0wW+dwdOg3SnSDRLBlF4YWPKCKvaZSs5SVKEn8HTbXLlh1",
+ "J4iadEgGmjKjHgQfrJBVhv2J9XV0x7ycoLWT1NwHvyc2R5aTM4UXRhv4U9igbfe1daIfB673a5AUI6Oa",
+ "0005QUC9a85cyGETWNNU5xtzzekVbMg5SCCqmhdMaxsV0RYktSiTcICoRWJkRmcTsg5ovwO7GKne4lDB",
+ "8vpbMZ1YsWUcvuOO4NJChxOYSiHyHWznPWREIdjJtk5KYXaduSAcH6nhKakFpBNi0CBYM89bqoVmXAH5",
+ "b1GRlHIUwCoN9Y0gJLJZvH7NDOYCq+d0VvQGQ5BDAVauxC9373YXfveu23OmyALOfeSaadhFx927qCW9",
+ "Fkq3Dtc1qOjmuB1FeDuaasxF4WS4Lk+ZbbVFuJF32cnXncFr+445U0o5wjXLvzID6JzM9S5rD2lkRdVq",
+ "+9px3J2sMMHQsXXjvqML8dPo8M3QMej6EweOl+bjkO/FyFf55hr4tB2ISCglKDxVoV6i7FexCIMb3bFT",
+ "G6Wh6Kv2tuvvA4LNGy8W9KRMwXPGISkEh000np9xeIkfY73tyR7ojDx2qG9XbGrB3wGrPc8uVHhV/OJu",
+ "B6T8unY6XsPmd8ftWHXCsE7USiEvCSVpzlBnFVxpWaX6hFOUioOzHDH1e1l/WE966pvEFbOI3uSGOuFU",
+ "GRzWsnLUPLmAiBb8A4BXl1S1XILSHflgAXDCXSvGScWZxrkKs1+J3bASJNrbZ7ZlQTdkQXNU6/4AKci8",
+ "0u0bE6PPlDZalzUxmWmIWJxwqkkORgN9yfjxGofzQV6eZjjocyFPayzMoudhCRwUU0ncJfGj/foTVSu/",
+ "fNPQM0nX2RpRzPhNiNpGQyu8/f/e/o+D3w6T/6HJH/vJk3/be/fh4cc7d3s/3v/43Xf/r/3Tg4/f3fmP",
+ "f43tlIc9FhvlID965qTJo2coMjTGpR7sn83iUDCeRInseAWkYBxDbDu0RW4bwccT0J3GTOV2/YTrNTeE",
+ "dEZzllF9OXLosrjeWbSno0M1rY3oKJB+re9iLt2lSEqanqJHb7JkelXNZ6ko9rwUvbcUtUS9l1EoBMdv",
+ "2R4t2Z4qId07u7flSr8CvyIRdtVhspcWCPr+wHg8I5osXYginrxFxS1RVMoZKTFcx/tlxGJax6zaXLUD",
+ "ggGNK+qdiu7P+48eT6ZNIGL93Wjq9uu7yJlg2ToWbprBOiapuaOGR+yWIiXdKNBxPoSwR11Q1m8RDluA",
+ "EfHVipWfn+cozeZxXvmTY4xO41vzI24DMMxJRPPsxll9xOLzw60lQAalXsVyWFoyB7ZqdhOg41IppTgD",
+ "PiVsBrOuxpUtQXlnWA50gbkUaGIUuwR11efAEpqnigDr4UJ2Umti9INisuP7H6cTJ0aoa5fs3cAxuLpz",
+ "1rZY/7cW5NaPz4/JnmO96paNfLZDB7GqEUuGC8dqOdsMN7OZezb0+4Sf8GewYJyZ7wcnPKOa7s2pYqna",
+ "qxTI72lOeQqzpSAHPsLrGdX0hPdktsHk2iC2jpTVPGcpOQ1l64Y8bcJUf4STk98Mxz85edfz3PQlYTdV",
+ "lL/YCZJzplei0onLCEkknFOZRUBXdUYAjmzzucZmnRI3tmXFLuPEjR/nebQsVTcyuL/8sszN8gMyVC7u",
+ "1WwZUVpIL9UYUcdCg/v7SriLQdJzn05UKVDkfUHL3xjX70hyUu3vPwDSCpV974QHQ5ObElo2r0tFLnft",
+ "XbhwqyHBWkualHQJKrp8DbTE3UfJu0Drap4T7NYK0fUBLThUswCPj+ENsHBcONwQF/fW9vKpvfEl4Cfc",
+ "QmxjxI3GaXHZ/QqCdi+9XZ3A394uVXqVmLMdXZUyJO53ps74Wxohy3uSFFtycwhccuQcSLqC9BQyzNOC",
+ "otSbaau7d1Y6kdWzDqZsPqONKsSkGzQPzoFUZUadUE/5ppv9oEBrn/LxBk5hcyyanJ2LpDu0o+/V0EFF",
+ "Sg2kS0Os4bF1Y3Q33zm+MeK4LH0QOwZserI4qOnC9xk+yFbkvYZDHCOKVnT4ECKojCDCEv8ACi6xUDPe",
+ "lUg/tjyjr8ztzRdJf/S8n7gmjRrmnNfhajDo3X4vAJOjxbkic2rkduHyem2EecDFKkWXMCAhhxbaHeO4",
+ "W1ZdHGTbvRe96cSie6H17psoyLZxYtYcpRQwXwypoDLTCVnwM1knAK5gRrBch0PYPEcxqY6WsEyHypal",
+ "3NYfGAItTsAgeSNweDDaGAklmxVVPuUYM7P9Wd5JBviEGRNjeXJHgbc9SL+us+A8z+2e05526bLlfIqc",
+ "z4sLVcsdctyMhI8BYLHtEBwFoAxyWNqF28aeUJrsjWaDDBw/LxY540CSmOOeKiVSZnPGm2vGzQFGPr5L",
+ "iDUmk51HiJFxADY6t3Bg8kqEZ5MvLwIkd9kn1I+NbrHgb4iHXdrQLCPyiNKwcMYHguo8B6Au2qO+vzox",
+ "RzgMYXxKDJs7o7lhc07jawbppWuh2NpJznLu1TtD4uyILd9eLBdak72KLrOaUGbyQMcFuhGIx0WJ2BYo",
+ "xJezZdW4GrpLd5l64PoewtXtINHrUgB0NP2mJJLT/LZqaO27uX+TNSx92iQw+6jSGO0P0U90lwbw1zdB",
+ "1KlZr7vXdVRJb7td21lpgfwUY8XmjPR9HX2PioIcUCJOWhJEchrzgBnBHpDdvvXdAs0dc98o39wJfPkS",
+ "lkxpaGzR5lbyzpXPbZujmHIvxGJ4dbqUC7O+N0LUPNrmdGLH1jI/+wrOhIZkwaTSCRryo0swjX5QqFH+",
+ "YJrGBYV2tICtPsOyOG/AaU9hk2Qsr+L06ub9+zMz7avaCKOq+SlsUBwEmq7IHKslRWOIRqa2YWajC35h",
+ "F/yCXtt6dzsNpqmZWBpyac/xjZyLDucdYwcRAowRR3/XBlE6wiDx4n8GuY6lhwVCgz2cmWk4GzM99g5T",
+ "5sfeGn1hoRi+o+xI0bUE2vLoKhj6SIy6x3RQbKif8jBwBmhZsmzdMQTaUQfVRXohbd9ncXewgLvrBtuC",
+ "gcDoF4uqlaDaCfuNdGvLRvFwbbOdMHPcTqsPGUI4FVO+6GEfUYa0sTLXNlwdA83/DptfTVtczuTjdHI1",
+ "u2EM127ELbh+XW9vFM/o4bZ2pJYb4IIop2UpxRnNE2ddHSJNKc4caWJzb4z9zKwubsM7fn744rUD/+N0",
+ "kuZAZVKLCoOrwnblN7MqWxtg4ID4ompG4fEyuxUlg82vc7ZDi+z5ClwBq0Aa7VXaaKztwVF0FtpFPNBm",
+ "q73VOQbsEkccBFDW/oHGdmXdA22XAD2jLPdGIw/tQFAMLm63ci1RrhAOcGXXQuAhSq6V3fROd/x0NNS1",
+ "hSeFc42U2CpsFTlFBO/6j40IibYoJNWCYp0MaxLoMydeFYk5fonKWRo3MPK5MsTBrePINCbYeEAYNSNW",
+ "bMAPySsWjGWaqR0U3Q6QwRxRZPqaK0O4mwtX/rfi7J8VEJYB1+aTxFPZOahYmMSZmvvXqZEd+nO5ga15",
+ "uhn+KjJGWCOme+MhEOMCRuim6oH7rFaZ/UJrc4z5IbDHX8DbHc7YuxJHPNWOPhw12xjAVdvdFFbr7fM/",
+ "Qxi2stv2UsFeeXXFagbmiJb+ZSpZSPEHxPU8VI8jIfe+Kg7DEI8/gM8imUtdFlNbd5oKxs3sg9s9JN2E",
+ "Vqi2h36A6nHnA58UViDx5lnK7VbbSpytQK84wYTBmXt2/IZgHMy9gNacns9prDyLETIMTIeN97NlSNaC",
+ "+M4e987mzVyhohkJHKl1W2aT0UqQTTZMP/H5kgKDnXZnUaGRDJBqQ5lgap1fuRKRYSp+Trkt6Gr62aPk",
+ "eiuwxi/T61xITCVVcZt3BikraB6XHDLEfjv1NmNLZsuZVgqCepluIFsH2lKRqzlq/csNao4WZH8aVOR1",
+ "u5GxM6bYPAdscc+2mFOFnLw2RNVdzPKA65XC5vd3aL6qeCYh0ytlEasEqYU6VG9qz80c9DkAJ/vY7t4T",
+ "cht9VoqdwR2DRXc/Tw7uPUGjq/1jP3YBuLrFY9wkQ3byn46dxOkYnXZ2DMO43aizaGKkLTY/zLhGTpPt",
+ "ustZwpaO120/SwXldAnxMIliC0y2L+4mGtI6eOGZrZSstBQbwnR8ftDU8KeBIG7D/iwYJBVFwXThPBtK",
+ "FIaemmKYdlI/nC277Eo1ebj8R3QQlt4/0lEiP6/R1N5vsVWjG/cVLaCN1imhNn84Z43r3ldXI0e+CgHW",
+ "rqpLVlncmLnM0lHMQU/+gpSScY2KRaUXyd9IuqKSpob9zYbATeaPH0bqdbVL9PCLAf7Z8S5BgTyLo14O",
+ "kL2XIVxfcpsLnhSGo2R3mqSJ4FQOejLj0WKeo3eDBceH3lUoM6Mkg+RWtciNBpz6SoTHRwa8IinW67kQ",
+ "PV54ZZ+dMisZJw9amR365c0LJ2UUQsZq0jTH3UkcErRkcIaBa/FNMmNecS9kvtMuXAX6L+t58CJnIJb5",
+ "sxxTBL6vWJ792iSBdUoeSsrTVdTuPzcdf28qU9dLtuc4WgJlRTmHPDqcvTN/93dr5Pb/h9h1noLxHdt2",
+ "Sxna5XYW1wDeBtMD5Sc06GU6NxOEWG1nxdRRl/lSZATnaeptNFTWr84YlCv7ZwVKxzIM8ION/ED7jtEL",
+ "bLUsAjxDqXpGfrQvy6yAtMoBoDTLiiq3qeWQLUE6w2NV5oJmU2LGOX5++ILYWW0fW1/VVutaojDXXkVH",
+ "rw+K8+wWQ+hLpcbjm3cfZzzg0qxaaazOoTQtylgummlx7Btgwlto60QxL8TOjDyzErby8pudxNDDgsnC",
+ "SKb1aJbHI02Y/2hN0xWKri1uMkzyu5eZ81SpgmL8dVHdur4OnjsDt6s0ZwvNTYkw+sU5U/ZBETiDdvpb",
+ "nQvqVCefDtdenqw4t5QS5dFjucqXQbsHzjq0vTk0ClkH8RcUXGyVxotW3XuLvaIFK7ol/HpV+G0KVF0P",
+ "1j8UlVIuOEuxXETwhEkNsnucZBdfwQ6VNbrGKH/E3QmNHK5o4cA6nMhhcbCUoGeEDnF9Y2Xw1WyqpQ77",
+ "p8ZXMFZUkyVo5TgbZFNf/9LZSxhX4Ool4Ts1AZ8UsuV/QQ4Zdeklten3gmSEsfMDAvAP5tsrpx5hUOkp",
+ "4ygIObS5+FVr0cC3E7SRnpgmSwHKraedQKh+M31mmEuXwfrdzL+1gGNY94VZtvXV9Yc69J475ykzbZ+a",
+ "trZyQvNzK0zRTnpYlm7S4eqoUXlAr/kggiMemMSbwAPk1uOHo42Q26jLHe9TQ2hwhg47KPEe7hFGXSm0",
+ "Uxr5jOaVpShsQWyoSzRhmvEIGC8Yh+YlkMgFkUavBNwYPK8D/VQqqbYi4E487Rhojl66GENT2plorzpU",
+ "Z4MRJbhGP8fwNjZFTgcYR92gEdwo39QPkBjqDoSJp/jykUNkv2QpSlVOiMow7LhTxDTGOAzj9mWS2xdA",
+ "/xj0ZSLbXUtqT85FbqKhTLJ5lS1BJzTLYoXmvsevBL+SrELJAdaQVnWhrrIkKZZgaNek6FObmygVXFXF",
+ "yFy+wRWnS0VMjn6FEygfV90MPiPIfg3rffb89ZvnTw+Pnz+z94VRy20qmZG5JRSGIRo9VmkwonOlgLwP",
+ "0fge+73vLDgOZlC8OEK0YQFlT4gYUD/f4L+xYlrDBOR86heO6vIOdOx4YfG+PVJPODdHL1FsmeyOCbz6",
+ "ro6OZurLncem/7UeyFws24B85jT3MWYc7lGMDT8391uYBd6rEGdvwDpJG2OohH8HAbXbOr2wzTzxxu2V",
+ "jEPbfV3Sftx6Mlycfop39EAkZZDcT60YYJ1BQ/GU6WD4L9UuC0dTMsopsaJ8bAQbjGEr2du3MKOGsKEA",
+ "DBt/YT73eu8mwPbUARx7FKE+sqcP0N992CApKXOezoZZ9DHrAoz7Id+7hB42G9xdhAvbxUFiK4lXCB+u",
+ "s9HU1sBroBSKNVUtY6XDdwwrOcbq30GdkP5Y3qd7Bqk2Qn3gq5IAF6kaYiYLHjq4qbcxoH7U0TeuzMZY",
+ "bY1+/dItzKaXARBksdjaj7PdK0kc1hEJ6CfFpwaWwN1bA+3Y3p0jDBcLSDU725Jx8Z9GS22i+adej7UP",
+ "2QQJGKyOWPOv715QvW4AGkuIGIUnqD91ZXCG4q1PYXNLkRY1RItRTj3Pu0yiMmIAuUNiSESomMfPGt6c",
+ "E4apmjIQC97DbrtDU/JlsAp4kD90ybk8SRIa5hSNTHkmYpr7TnOZrhfKtMPgq6GkjH4d3mFB6BmWPVb1",
+ "Cw7187qBVkOO+uWgzl2iNObH1LZmnzINyv/mk+HsLPbZ5qZOOVr2z6nMfIuoquq14GTkPuplUvgasl2g",
+ "F/XMrImH6sfORwqMYNRbmgvF+DIZCh1shyCFT76hoxWvAyxwjHAtQLr3CbR/FTvRwsdPjcExhgr3PNll",
+ "kKAGi3pZ4AZT7d80tQSwTCK1b6I7J3K4QKO3UgOdDDL+h+ccQ/ZT+90Hi/syeTto5I5ek60p+z4Sjqke",
+ "EkOqXxB3W24PQr+M1ss4t+/VqFj6PzeoDK3HpRRZldoLOjwYjY1h1+IaI6wkqjCm/VX2ZP8cS828CFJ6",
+ "TmGzZ+XvdEV5U/OnfaytCGXXEKTQdnb7Wg0Ccd0nX9oFLK8Fzi+pVE8npRB5MmAuPupXMeiegVOWnkJG",
+ "zN3hY0gGKoGT22ilrP2B56uNz9ovS+CQ3ZkRYtTyotQb7xpsF+TsTM5v6bH51zhrVtnCIk7fn53wePgT",
+ "lvyQV+RvfphxrqbAML8rTmUH2VImYD1QQUHS80hd/F3faow467q1yhuislDEpJRL5ozudL77On+E9INi",
+ "3ePaT5hS7rM+UyGt6QilJW/Q6QovL4ceOezojf7JzPqcBVMqjKg1TEHZAliiLyQFRgH1tNa145vfV8kx",
+ "H1pwrBHRV+UVmsawzmKICENn8ozmn18dx0T5Q8SHe80lvtBQnwuRbFGpLud9f0F3mjvQ3a5vav4azQf/",
+ "CWaPojZNN5SzC9YFyH3dNKwHRHOSi+YhAhySnOOY1gh67zGZuwjLUkLKFOsEn5/7EnC1+oIVUZtHfsb1",
+ "pW3r/FXoK5CxE3hFSV415aS0QH7XQNic/S8cSDdwcqNUHqO+HllE8DfIfV9vN7kFzxJ4WccR8xdG3Mua",
+ "5QZLia00TOrcwuhPW3ZgW4iwE4chJFyzPThwQF/QHtxPV911ebgO3NBKQX+dO9+zLdxGrthmbbs6M/rI",
+ "HfZB6PkuPoh40TTTHZ0gFiFYcZAgqOT9vfdEwgJLigty9y5OcPfu1DV9f7/92TCuu3ejAthnc3+0Xv51",
+ "88Yo5tehuD0bmzYQItrZj4rl2TbCaAX8NuX9MaT1dxca/UUeGPjdWkL7R9WVZr6I47W7CYiYyFpbkwdT",
+ "BaG8O0Txum6z6NvMCtJKMr3BjG1vOGO/Ryvh/Fjb2p2vps7xc7e8FqdQ5/w3lvlKeTniR2Hfci6MNIxu",
+ "b41vPT1f06LMwR2U727N/x0e/O1htv/g3r/P/7b/aD+Fh4+e7O/TJw/pvScP7sH9vz16uA/3Fo+fzO9n",
+ "9x/enz+8//Dxoyfpg4f35g8fP/n3W4YPGZAtoBOfHzT5L3yFIzl8fZQcG2AbnNCS1U+8GTL2BcBpiicR",
+ "CsryyYH/6X/7EzZLRdEM73+duPSDyUrrUh3s7Z2fn8/CLntLNMUlWlTpas/P039a6/VRHRptU1pxR23U",
+ "qyEF3FRHCof47c3zt8fk8PXRrCGYycFkf7Y/u4cP55TAackmB5MH+BOenhXu+54jtsnBh4/Tyd4KaI6e",
+ "K/NHAVqy1H9S53S5BDlzldDNT2f397zQtPfBmSE/mlGXsVx2G+QdRPb2C4Q7lwZGytgg7lbBTeXqP07r",
+ "MqzOSsAzjL21lj3D2mpkHWVNybWjhlH5xHNbiefgt8hLMwu2rGTnUcra/+9qNDNF7JPokjgJ4zVNT8P4",
+ "ViTIf1YgNw3BOFYWlpDxJTNdFGyhlmU7ZKyRa2LP18UqrePMZp8DSq09Ag0n0rKCEJKGrxpeuZ88effh",
+ "0d8+TnYABN1TCjDB8D3N8/f21VBYo43fp+i7FMxppDwkKgLTxsKMHZptmmLMW/01rABet2lHWr/ngsP7",
+ "oW1wgEX3gea5aSg4xPbgHabAISXgIbq/v39tTwfUyQU2cq4exZPEJQbqcxj7KfKumH9BYOBRsYfXuNB2",
+ "zM2Vl9sdrrfo72mGVZlBabuUe9/sUo44eogNxyf2Rvs4nTz6hvfmiBueQ3OCLYP88v4t8gs/5eKc+5ZG",
+ "mqmKgsoNyipB6fhQKv04eFvthWVu9z60/IfZle6yXoXvo2dbrrdbaogp9gsvdaromu91nVj0MLlSwbBm",
+ "Sqs7M/Jj2BsZM+Yx2izBSvLmHc1SijNmtHlfmMGXe2hgu6XCFM/oZRsYZW/u3U967x62rQ6tyj0xYFok",
+ "PgpTL8DgqhdfP6688wjKpR4ZCer1XqLq4SetxN5R+gaftt6Bwd7gbuhZ8AHxJoC3lnTadZY/Pd+1+ltw",
+ "TbTug0/Ilb9xYe0lzQ2dBMvt5LjZclY3QtxfRoirY87sW2ZYwXFMrMMy7XsffPWxaxDlXPW1HYS4UNMN",
+ "+gbVsW53OMWdmS0lFra5HDtw8WNbxTOsCXcjmH1qwaxfTDEGRlMi78sJYwjDqqm2eJEHxFqPI1yoKuQ3",
+ "Kn39hZE1KG4ZSLcLWpfgjT0hynHiT8Yz/5TCk0Pajdj0lxabbMj2iODUqnTq4vuHZSfQLqXLpiZH8gEU",
+ "hhXb0adECemiXEvJhGR6MyWMkwzM2UOPoZBY1EXLiqfW0G+nAI7/fXn4X5hh8PLwv8h3ZH9ai2CY8x6Z",
+ "3sZwtmWgH0H3Q5XV95vDWhwYlYW+GgHjuEZSkEQQol4LX6wUkVbQ9XdDKFtbv2JMPCvoejIqiUy/HWnx",
+ "qkJTJ3myT0VYsocTdPr7h/XakbOKwJqmOt8QivfPxqZ4qGreVBptixtalEk4QDSab2RG/25XLM/8osG7",
+ "kZJA+D7WOHzHnaqMLXS47E98JG+7YNJDRhSCy0l5N7v7ze5uXywlpTBnmmHJqeY+8XdVC8jm9SYH7kBe",
+ "woz8t6gw2MU+Tgqxcuk4A+Zw+DmdABrk7+b4NGyNnbt3uwu/e9ftOVNkAefIQSnHhl103L37JxBZ13WV",
+ "akq44AnHtzPPgAQRcjdy61cttz7af/DNruYtyDOWAjmGohSSSpZvyC+8Lut3NbG85jkVDwotjvKfXkJU",
+ "I0UH4vuVfNdd3zTTjWTYyo8NTAj1E8dOV542byQZXR7LsfkSN2rqXScY+Ge9KnY/pj3HyiwmpAcenO83",
+ "R892kcu/EUfozmVBI/dafG8+9Q0Qjad583niaXZjpg/3H34+CMJdeCU0+QHNZZ+YpX9S20GcrAJmc2GP",
+ "SuMxCVmLyzcfZSrmhE5dKXesLb4hdT6o4SeWEdrHnPpcw8ywK7/4iu3zW83CUbrsoveGL9zwhSvxhS5B",
+ "NRwB8z7V3gd0FYTsoHckMW/tT+RiDPwtUhTe4SLIAnS6cvmwnbSYCFvx1YWHecrYGzzX7P9DoCPFLcMy",
+ "Xfg2zI5530GqIjq9QEaI72dfQc98ZgvM3q8rR/unptCdw/zrC/XDC+55GqZ8zLlPmzW7eCEonzaT99N0",
+ "EC3X4TO8QfDFENxjas9dkqk9Xm4Rf4aodP9IQkJeiSYr2xVO/jOaPT7ljfypF/RKcLB+aSOxWlq8cUHW",
+ "4gK+VodI8Qng1vHoHsCPiw57WEfC8sC9ugrbkDARL2y5Rbhobmwt6gCJaC3KOeSCL9XXeWePbXMcL5Ht",
+ "rkt+xut6/vWE9qdYooILX93MFS1RjKdgX/PwD98VTCkXxvOFBfpPaWz9nNZRLBhaV4zwcQrRGrGKZZ36",
+ "ikFBjyHW0opn+KDXLPu4ncUEPrELchfGA+4SGnFpWQKVl2cr253ux50Zj56F0WCtEpV1sZUIKAZFF4xX",
+ "+LfJjjoTphWKBVlRtSKLiltA6zdv7eFzoVpiMa1dQubOFYsDcsLvErWij+7d//3+o8f+z/uPHg9ofWYe",
+ "V+Wgr/c1A5nPdphdlL8/b3BDW2GpkXfwubfyYjs0nbBsHa1H19ScDs+F87Agn7ilSEk3g2Usyy01s8Nh",
+ "m/rZn7/IldJsHn8C9iezPWJB6oeZjvj3tXhmKzG5UtM3tbIHQhICJmIIrSmaXWN9vH72iADWIcu6UPHn",
+ "ttk2EaP2FvPIk50L5YvKhvqLyIavBE9Q3QPuBZI2Wr6cLIg1E6eB/6R+6s6IsqoqSyF1fbrVbCcxDQad",
+ "2KGUNki4TghLqU5XVbn3Af+DpUw+NkVD7EuOe9azMyaHvbUtrjVmz47ZlLtrV89x3iaxIC9ZKsUhVtN0",
+ "N4baKA1F/51/2/X3sTcCo7eL4DnjkBSCxwrv/IxfX+LHaL1OjAMa6IwRWUN9u6+ztuDvgNWeZxfmdlX8",
+ "fiUK6JUsIJ3VSijruGe0OiD9N6el9WBCc0xaP+99aP3pHLCupVpVOhPnQV9Ukuy538VBE1TF21nVaZSL",
+ "Tq1PRTJQhrq+PRtKgIcYaddfI6VTgtqHg9VT/qJWlQXjWYdIUHpLxRlIVSv98ivxlf5ZTCu7ozxgGpXa",
+ "xicqdb0X8iuRgR23XdAulsXCRQauCFj/Hq5FjbjG6ply066jQ6S0Wq40qUqiRUxbaTomNLWsyz5Uora9",
+ "7GBb+QrmZ0BoLoFmGzIH4ETMzaLbL+QQqvAdHa/yOIEq/kBBA1cpRQpKQZaEr1qPgVaXVkMFSY/gCQFH",
+ "gOtZiBJkQeUlgbWSxTiguhNaXoNb+yud8NCHerfpxzawO3m4jVQC8RcYWjxEUebgbB4RFO6IE1TJ2Sfe",
+ "Pz/JZbevKvHh5MgTG/brMSvw/uOUCwWp4Jkafghn27HF4tjBWpRZQXBSou/RmoEH5PEXVGn3bnfrvYCg",
+ "CLeZYuTlnqGyqGbkX+uiqL2xU8MvuapU86S5VdEgi62Bw3pkrlewrudCO7gfu9YBtSCVgm0jD2EpGL9+",
+ "5Dx4ikcHVm4skd1fHKbiUqe/9VHZAqJBxBggb32rALuhBXYAEHwItQwlcPfuQwPXXIgcKLemNFGW5vzp",
+ "pOJ1vyE0vbWtD/UvTds+cbkURuTrmQAV6ucO8nOLWYVxjiuqiIODFPTUqfBLl0nYh9kcxgS9VckY5Ztj",
+ "+da0Co/AlkPa1RXD4986Z53D0aHfKNENEsGWXRhacEw7/SrE7ovKs127/id02Le180C8mnWkwr1zynSy",
+ "ENK9zUYXGmREtezUBKVMK2czssYzLZwrjOAIjqG4cdwbX01RFZeGZUHwqcBm9/uRv2aqH4TcKdSwHQ1A",
+ "mSYV18zXczHnrZYxvz799UZ6vpGeb6TnG+n5Rnq+kZ5vpOcb6flTS89fJneIJInn0z50JJYWTibfpIR/",
+ "Y7Ee0UYCMdUpCUZEN+d4NKZYA81xQSzHy7UUajA5EZ8QUqKSKZDUTMc4KXNqpCFYa18ih8ypgscPwxfl",
+ "l5IW7hEhw2tMgwf3ydufDn0g08oF3LTb3vbPmCu9yeGOy72oX/nwSRjADQZdDgb12k/qAs6sML9gORBl",
+ "cPUcWz+DM8iNJG9jJIjRRfra0THQ/KnDzRblqPWOgxnt/bSlkzm0FbQMnoXDtVJFKAa9dZ5hWNBcDb/D",
+ "YMcraBkrMlTzaas2IWv4XmSbDrmbXdvDDWwTehPOxDiVm0icYo+8e6ShhWE+jrD6et/Haw+66xNtn8y2",
+ "UVj83VIVPZRjVB6NNqs3rDeUjXhcdOgk+ghRN8RqUgO4S5SBoWe/J+SN7fdFbyuCELkj1nDmr8bn233I",
+ "3TENbGsEKsd6vtVUVY/46OnFsz/1D13ji6yO4taJabQEnjjeksxFtklanKl9wWRMUaWgmG+/ZELWiIep",
+ "vlfMl/Er6MvcEM+CxY2x25Ae1onjrQOM18aR7sZ2a2zhiI7zBhj/1Nx3iEOGIBDHemK6c7eG6gX5WTPN",
+ "5oan3fC04DR2LnvGXQhzl4nMLsfT5EZWfJidPbev0ysSHtLb6o5hWYjRtW5Z7jOYV8ulfZK9a4XGWp44",
+ "XvNQ8Ofmcna5uzK4ixGHHbwugHHV3M3ucH3GEUTi3haSLKWoyju2sjLfoIGzKCnfeKeG0fyLKrc4tPnm",
+ "18tDbQBx7D1Mb1wbtsu99ua3wPrkbtH27xYt5Jwq9y4iZKTimL4YSzNYd57q3I7x4zVvOPDoQ57+5fre",
+ "6ty8u3B/v8suqLB25JQgE73m9kC1DpNLZ7And3ZT5OWvcSO8thXMBxhsPzS/YQjbLwYZsCy8GTolP/3V",
+ "0Oanb+h5WED0uoTG3bX1FeCdWGuvkfqoRoyUgmYpVWjU4KDPhTz9xLKkXh9FrMgIJta57iemGZ1ktlWo",
+ "xHF3EinbuaBeK6/mmMQsvizXIAlpko4OXdmQFjZuDLt/FsPu9/7wKUKJpOfdw2l9OHgmd2BT9FyveZRL",
+ "7ZX2nYyh+OUwq9u2vNZIjN7w7YCM4JUK61CGvCSUpDlDd7PgSssq1SecokMrWFi/ZHTtphsWpZ76JnGf",
+ "asTl6YY64UaoWpDazRUVqRYQcWD/AOAlNlUtl6B0hxMvAE64a8U4qTjTOFfBUikSmwxkrmvD0We2ZUE3",
+ "ZEFz9Mj+AVKQuVEiwtqp6B5SmuW5iw4x0xCxOOFUkxwM03/JjEBnhvMehDriydJdjYV4HrB7JDuJW2d/",
+ "tF8x1dYt33sB0FlhP/sUuemXeco+Ydkg5EfPXF3zo2dYqraJC+nB/tmCBQrGkyiRmRvfxVd1aYvcNjKe",
+ "J6A7TYSJ2/UTboRpLQgyeqovRw5dp27vLNrT0aGa1kZ0fL9+re9iNbWWIjEqI12a35dMr6o5Pibva23t",
+ "LUVdd2svo1AIjt+yPVqyPVVCund2b4t8cAV+RSLs6ubm/hMlEQV0YE5LvfH4flN37wfu5Wt4Rubrfjtm",
+ "a8DpzUstNy+13LzlcfNSy83u3rzUcvOOyc07Jn/Vd0xmoxKiK9C39WUB3TNtUiIhtTPXDDxs1nqDoO+V",
+ "ZHpGyPHK8H9q7gA4A0lzklJlBSNu454LLEuoqjQFyA5OeNKCxBYjNBPfbv5r1dyTan//AZD9O90+1m4R",
+ "cN5+XxRV8RO6msh35GRyMumNJKEQZ+AqkmPzrMLwF9tr67D/qx73Z9nbuoJurHFlRcsSzLWmqsWCpcyi",
+ "PBdGGViKTrQ2F/gFpAHOlkcjTNvHXxCfGOXuYmKoqz0UE7r79/sFnq4+7Bax+qxlD/+8AvYYn+pv2PXx",
+ "wNGxewzxhmV8DpbxxZnGn6gO/E3J969sQaEjtfWmy1UK8/jHzCN2Jy8jWXOy4c04AqSVZHqDNxwt2e+n",
+ "YP7/zvBxBfLMX36VzCcHk5XW5cHeHr66thJK703M1dR8U52P5n6gSzuCu1xKyc7wxYZ3H/9/AAAA//9I",
+ "ljPEDCwBAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index 53983915d..db3bd074e 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -366,6 +366,19 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// LightBlockHeaderProof defines model for LightBlockHeaderProof.
+type LightBlockHeaderProof struct {
+
+ // The index of the light block header in the vector commitment tree
+ Index uint64 `json:"index"`
+
+ // The encoded proof.
+ Proof []byte `json:"proof"`
+
+ // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
// ParticipationKey defines model for ParticipationKey.
type ParticipationKey struct {
@@ -443,6 +456,32 @@ type PendingTransactionResponse struct {
// StateDelta defines model for StateDelta.
type StateDelta []EvalDeltaKeyValue
+// StateProof defines model for StateProof.
+type StateProof struct {
+
+ // Represents the message that the state proofs are attesting to.
+ Message struct {
+
+ // The vector commitment root on all light block headers within a state proof interval.
+ BlockHeadersCommitment []byte `json:"BlockHeadersCommitment"`
+
+ // The first round the message attests to.
+ FirstAttestedRound uint64 `json:"FirstAttestedRound"`
+
+ // The last round the message attests to.
+ LastAttestedRound uint64 `json:"LastAttestedRound"`
+
+ // An integer value representing the natural log of the proven weight with 16 bits of precision. This value would be used to verify the next state proof.
+ LnProvenWeight uint64 `json:"LnProvenWeight"`
+
+ // The vector commitment root of the top N accounts to sign the next StateProof.
+ VotersCommitment []byte `json:"VotersCommitment"`
+ } `json:"Message"`
+
+ // The encoded StateProof for the message.
+ StateProof []byte `json:"StateProof"`
+}
+
// TealKeyValue defines model for TealKeyValue.
type TealKeyValue struct {
Key string `json:"key"`
@@ -637,6 +676,9 @@ type DryrunResponse struct {
Txns []DryrunTxnResult `json:"txns"`
}
+// LightBlockHeaderProofResponse defines model for LightBlockHeaderProofResponse.
+type LightBlockHeaderProofResponse LightBlockHeaderProof
+
// NodeStatusResponse defines model for NodeStatusResponse.
type NodeStatusResponse struct {
@@ -716,26 +758,8 @@ type PostTransactionsResponse struct {
TxId string `json:"txId"`
}
-// ProofResponse defines model for ProofResponse.
-type ProofResponse struct {
-
- // The type of hash function used to create the proof, must be one of:
- // * sha512_256
- // * sha256
- Hashtype string `json:"hashtype"`
-
- // Index of the transaction in the block's payset.
- Idx uint64 `json:"idx"`
-
- // Merkle proof of transaction membership.
- Proof []byte `json:"proof"`
-
- // Hash of SignedTxnInBlock for verifying proof.
- Stibhash []byte `json:"stibhash"`
-
- // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
- Treedepth uint64 `json:"treedepth"`
-}
+// StateProofResponse defines model for StateProofResponse.
+type StateProofResponse StateProof
// SupplyResponse defines model for SupplyResponse.
type SupplyResponse struct {
@@ -777,6 +801,27 @@ type TransactionParametersResponse struct {
MinFee uint64 `json:"min-fee"`
}
+// TransactionProofResponse defines model for TransactionProofResponse.
+type TransactionProofResponse struct {
+
+ // The type of hash function used to create the proof, must be one of:
+ // * sha512_256
+ // * sha256
+ Hashtype string `json:"hashtype"`
+
+ // Index of the transaction in the block's payset.
+ Idx uint64 `json:"idx"`
+
+ // Proof of transaction membership.
+ Proof []byte `json:"proof"`
+
+ // Hash of SignedTxnInBlock for verifying proof.
+ Stibhash []byte `json:"stibhash"`
+
+ // Represents the depth of the tree that is being proven, i.e. the number of edges from a leaf to the root.
+ Treedepth uint64 `json:"treedepth"`
+}
+
// VersionsResponse defines model for VersionsResponse.
type VersionsResponse Version
@@ -821,8 +866,8 @@ type GetBlockParams struct {
Format *string `json:"format,omitempty"`
}
-// GetProofParams defines parameters for GetProof.
-type GetProofParams struct {
+// GetTransactionProofParams defines parameters for GetTransactionProof.
+type GetTransactionProofParams struct {
// The type of hash function used to create the proof, must be one of:
// * sha512_256
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 9e18ca7d2..918440f19 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -46,6 +46,7 @@ import (
"github.com/algorand/go-algorand/node"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/stateproof"
"github.com/algorand/go-codec/codec"
)
@@ -74,6 +75,7 @@ type LedgerForAPI interface {
GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
EncodedBlockCert(rnd basics.Round) (blk []byte, cert []byte, err error)
Block(rnd basics.Round) (blk bookkeeping.Block, err error)
+ AddressTxns(id basics.Address, r basics.Round) ([]transactions.SignedTxnWithAD, error)
}
// NodeInterface represents node fns used by the handlers.
@@ -116,7 +118,7 @@ func convertParticipationRecord(record account.ParticipationRecord) generated.Pa
}
if record.StateProof != nil {
- tmp := record.StateProof[:]
+ tmp := record.StateProof.Commitment[:]
participationKey.Key.StateProofKey = &tmp
}
@@ -145,6 +147,31 @@ func convertParticipationRecord(record account.ParticipationRecord) generated.Pa
return participationKey
}
+// ErrNoStateProofForRound returned when a state proof transaction could not be found
+var ErrNoStateProofForRound = errors.New("no state proof can be found for that round")
+
+// GetStateProofTransactionForRound searches for a state proof transaction that can be used to prove on the given round (i.e the round is within the
+// attestation period). the latestRound should be provided as an upper bound for the search
+func GetStateProofTransactionForRound(txnFetcher LedgerForAPI, round basics.Round, latestRound basics.Round) (transactions.Transaction, error) {
+ for i := round + 1; i <= latestRound; i++ {
+ txns, err := txnFetcher.AddressTxns(transactions.StateProofSender, i)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+ for _, txn := range txns {
+ if txn.Txn.Type != protocol.StateProofTx {
+ continue
+ }
+
+ if txn.Txn.StateProofTxnFields.Message.FirstAttestedRound <= uint64(round) &&
+ uint64(round) <= txn.Txn.StateProofTxnFields.Message.LastAttestedRound {
+ return txn.Txn, nil
+ }
+ }
+ }
+ return transactions.Transaction{}, ErrNoStateProofForRound
+}
+
// GetParticipationKeys Return a list of participation keys
// (GET /v2/participation)
func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error {
@@ -566,13 +593,13 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
return ctx.Blob(http.StatusOK, contentType, data)
}
-// GetProof generates a Merkle proof for a transaction in a block.
+// GetTransactionProof generates a Merkle proof for a transaction in a block.
// (GET /v2/blocks/{round}/transactions/{txid}/proof)
-func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params generated.GetProofParams) error {
+func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params generated.GetTransactionProofParams) error {
var txID transactions.Txid
err := txID.UnmarshalText([]byte(txid))
if err != nil {
- return badRequest(ctx, err, errNoTxnSpecified, v2.Log)
+ return badRequest(ctx, err, errNoValidTxnSpecified, v2.Log)
}
if params.Hashtype != nil && *params.Hashtype != "sha512_256" && *params.Hashtype != "sha256" {
@@ -633,7 +660,7 @@ func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params
return internalError(ctx, err, "generating proof", v2.Log)
}
- response := generated.ProofResponse{
+ response := generated.TransactionProofResponse{
Proof: proof.GetConcatenatedProof(),
Stibhash: stibhash[:],
Idx: uint64(idx),
@@ -914,7 +941,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string,
txID := transactions.Txid{}
if err := txID.UnmarshalText([]byte(txid)); err != nil {
- return badRequest(ctx, err, errNoTxnSpecified, v2.Log)
+ return badRequest(ctx, err, errNoValidTxnSpecified, v2.Log)
}
txn, ok := v2.Node.GetPendingTransaction(txID)
@@ -1211,6 +1238,73 @@ func (v2 *Handlers) TealCompile(ctx echo.Context, params generated.TealCompilePa
return ctx.JSON(http.StatusOK, response)
}
+// GetStateProof returns the state proof for a given round.
+// (GET /v2/stateproofs/{round})
+func (v2 *Handlers) GetStateProof(ctx echo.Context, round uint64) error {
+ ledger := v2.Node.LedgerForAPI()
+ if ledger.Latest() < basics.Round(round) {
+ return internalError(ctx, errors.New(errRoundGreaterThanTheLatest), errRoundGreaterThanTheLatest, v2.Log)
+ }
+ tx, err := GetStateProofTransactionForRound(ledger, basics.Round(round), ledger.Latest())
+ if err != nil {
+ if errors.Is(err, ErrNoStateProofForRound) {
+ return notFound(ctx, err, err.Error(), v2.Log)
+ }
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ response := generated.StateProofResponse{
+ StateProof: protocol.Encode(&tx.StateProof),
+ }
+
+ response.Message.BlockHeadersCommitment = tx.Message.BlockHeadersCommitment
+ response.Message.VotersCommitment = tx.Message.VotersCommitment
+ response.Message.LnProvenWeight = tx.Message.LnProvenWeight
+ response.Message.FirstAttestedRound = tx.Message.FirstAttestedRound
+ response.Message.LastAttestedRound = tx.Message.LastAttestedRound
+
+ return ctx.JSON(http.StatusOK, response)
+}
+
+// GetLightBlockHeaderProof Gets a proof of a light block header for a given round
+// (GET /v2/blocks/{round}/lightheader/proof)
+func (v2 *Handlers) GetLightBlockHeaderProof(ctx echo.Context, round uint64) error {
+ ledger := v2.Node.LedgerForAPI()
+ if ledger.Latest() < basics.Round(round) {
+ return internalError(ctx, errors.New(errRoundGreaterThanTheLatest), errRoundGreaterThanTheLatest, v2.Log)
+ }
+
+ stateProof, err := GetStateProofTransactionForRound(ledger, basics.Round(round), ledger.Latest())
+ if err != nil {
+ if errors.Is(err, ErrNoStateProofForRound) {
+ return notFound(ctx, err, err.Error(), v2.Log)
+ }
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ lastAttestedRound := stateProof.Message.LastAttestedRound
+ firstAttestedRound := stateProof.Message.FirstAttestedRound
+ stateProofInterval := lastAttestedRound - firstAttestedRound + 1
+
+ lightHeaders, err := stateproof.FetchLightHeaders(ledger, stateProofInterval, basics.Round(lastAttestedRound))
+ if err != nil {
+ return notFound(ctx, err, err.Error(), v2.Log)
+ }
+
+ blockIndex := round - firstAttestedRound
+ leafproof, err := stateproof.GenerateProofOfLightBlockHeaders(stateProofInterval, lightHeaders, blockIndex)
+ if err != nil {
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ response := generated.LightBlockHeaderProofResponse{
+ Index: blockIndex,
+ Proof: leafproof.GetConcatenatedProof(),
+ Treedepth: uint64(leafproof.TreeDepth),
+ }
+ return ctx.JSON(http.StatusOK, response)
+}
+
// TealDisassemble disassembles the program bytecode in base64 into TEAL code.
// (POST /v2/teal/disassemble)
func (v2 *Handlers) TealDisassemble(ctx echo.Context) error {
diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go
index c8c7c7a11..b3062b42b 100644
--- a/daemon/algod/api/server/v2/test/handlers_resources_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go
@@ -28,6 +28,7 @@ import (
generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
@@ -41,6 +42,7 @@ import (
type mockLedger struct {
accounts map[basics.Address]basics.AccountData
latest basics.Round
+ blocks []bookkeeping.Block
}
func (l *mockLedger) LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error) {
@@ -112,6 +114,25 @@ func (l *mockLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error)
panic("not implemented")
}
+func (l *mockLedger) AddressTxns(id basics.Address, r basics.Round) ([]transactions.SignedTxnWithAD, error) {
+ blk := l.blocks[r]
+
+ spec := transactions.SpecialAddresses{
+ FeeSink: blk.FeeSink,
+ RewardsPool: blk.RewardsPool,
+ }
+
+ var res []transactions.SignedTxnWithAD
+
+ for _, tx := range blk.Payset {
+ if tx.Txn.MatchAddress(id, spec) {
+ signedTxn := transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: tx.Txn}}
+ res = append(res, signedTxn)
+ }
+ }
+ return res, nil
+}
+
func randomAccountWithResources(N int) basics.AccountData {
a := ledgertesting.RandomAccountData(0)
a.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index af970f4b1..789097fda 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -41,16 +41,20 @@ import (
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-codec/codec"
)
+const stateProofIntervalForHandlerTests = uint64(256)
+
func setupTestForMethodGet(t *testing.T) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, []account.Root, []transactions.SignedTxn, func()) {
numAccounts := 1
numTransactions := 1
@@ -930,10 +934,10 @@ func TestGetProofDefault(t *testing.T) {
defer releasefunc()
txid := stx.ID()
- err := handler.GetProof(c, 1, txid.String(), generated.GetProofParams{})
+ err := handler.GetTransactionProof(c, 1, txid.String(), generated.GetTransactionProofParams{})
a.NoError(err)
- var resp generatedV2.ProofResponse
+ var resp generatedV2.TransactionProofResponse
err = json.Unmarshal(rec.Body.Bytes(), &resp)
a.NoError(err)
a.Equal("sha512_256", resp.Hashtype)
@@ -942,18 +946,8 @@ func TestGetProofDefault(t *testing.T) {
blkHdr, err := l.BlockHdr(1)
a.NoError(err)
- // Build merklearray.Proof from ProofResponse
- var proof merklearray.Proof
- proof.HashFactory = crypto.HashFactory{HashType: crypto.Sha512_256}
- proof.TreeDepth = uint8(resp.Treedepth)
- a.NotEqual(proof.TreeDepth, 0)
- proofconcat := resp.Proof
- for len(proofconcat) > 0 {
- var d crypto.Digest
- copy(d[:], proofconcat)
- proof.Path = append(proof.Path, d[:])
- proofconcat = proofconcat[len(d):]
- }
+ singleLeafProof, err := merklearray.ProofDataToSingleLeafProof(resp.Hashtype, resp.Treedepth, resp.Proof)
+ a.NoError(err)
element := TxnMerkleElemRaw{Txn: crypto.Digest(txid)}
copy(element.Stib[:], resp.Stibhash[:])
@@ -961,6 +955,205 @@ func TestGetProofDefault(t *testing.T) {
elems[0] = &element
// Verifies that the default proof is using SHA512_256
- err = merklearray.Verify(blkHdr.TxnCommitments.NativeSha512_256Commitment.ToSlice(), elems, &proof)
+ err = merklearray.Verify(blkHdr.TxnCommitments.NativeSha512_256Commitment.ToSlice(), elems, singleLeafProof.ToProof())
+ a.NoError(err)
+}
+
+func newEmptyBlock(a *require.Assertions, lastBlock bookkeeping.Block, genBlk bookkeeping.Block, l v2.LedgerForAPI) bookkeeping.Block {
+ totalsRound, totals, err := l.LatestTotals()
+ a.NoError(err)
+ a.Equal(l.Latest(), totalsRound)
+
+ totalRewardUnits := totals.RewardUnits()
+ poolBal, _, _, err := l.LookupLatest(poolAddr)
+ a.NoError(err)
+
+ latestBlock := lastBlock
+
+ var blk bookkeeping.Block
+ blk.BlockHeader = bookkeeping.BlockHeader{
+ GenesisID: genBlk.GenesisID(),
+ GenesisHash: genBlk.GenesisHash(),
+ Round: l.Latest() + 1,
+ Branch: latestBlock.Hash(),
+ RewardsState: latestBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()),
+ UpgradeState: latestBlock.UpgradeState,
+ }
+
+ blk.BlockHeader.TxnCounter = latestBlock.TxnCounter
+
+ blk.RewardsPool = latestBlock.RewardsPool
+ blk.FeeSink = latestBlock.FeeSink
+ blk.CurrentProtocol = latestBlock.CurrentProtocol
+ blk.TimeStamp = latestBlock.TimeStamp + 1
+
+ blk.BlockHeader.TxnCounter++
+ blk.TxnCommitments, err = blk.PaysetCommit()
+ a.NoError(err)
+
+ return blk
+}
+
+func addStateProofIfNeeded(blk bookkeeping.Block) bookkeeping.Block {
+ round := uint64(blk.Round())
+ if round%stateProofIntervalForHandlerTests == (stateProofIntervalForHandlerTests/2+18) && round > stateProofIntervalForHandlerTests*2 {
+ return blk
+ }
+ stateProofRound := (round - round%stateProofIntervalForHandlerTests) - stateProofIntervalForHandlerTests
+ tx := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.StateProofTx,
+ Header: transactions.Header{Sender: transactions.StateProofSender},
+ StateProofTxnFields: transactions.StateProofTxnFields{
+ StateProofType: 0,
+ Message: stateproofmsg.Message{
+ BlockHeadersCommitment: []byte{0x0, 0x1, 0x2},
+ FirstAttestedRound: stateProofRound + 1,
+ LastAttestedRound: stateProofRound + stateProofIntervalForHandlerTests,
+ },
+ },
+ },
+ }
+ txnib := transactions.SignedTxnInBlock{SignedTxnWithAD: transactions.SignedTxnWithAD{SignedTxn: tx}}
+ blk.Payset = append(blk.Payset, txnib)
+
+ return blk
+}
+
+func insertRounds(a *require.Assertions, h v2.Handlers, numRounds int) {
+ ledger := h.Node.LedgerForAPI()
+
+ genBlk, err := ledger.Block(0)
+ a.NoError(err)
+
+ lastBlk := genBlk
+ for i := 0; i < numRounds; i++ {
+ blk := newEmptyBlock(a, lastBlk, genBlk, ledger)
+ blk = addStateProofIfNeeded(blk)
+ blk.BlockHeader.CurrentProtocol = protocol.ConsensusCurrentVersion
+ a.NoError(ledger.(*data.Ledger).AddBlock(blk, agreement.Certificate{}))
+ lastBlk = blk
+ }
+}
+
+func TestStateProofNotFound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ insertRounds(a, handler, 700)
+
+ a.NoError(handler.GetStateProof(ctx, 650))
+ a.Equal(404, responseRecorder.Code)
+}
+
+func TestStateProofHigherRoundThanLatest(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ a.NoError(handler.GetStateProof(ctx, 2))
+ a.Equal(500, responseRecorder.Code)
+}
+
+func TestStateProof200(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ insertRounds(a, handler, 1000)
+
+ a.NoError(handler.GetStateProof(ctx, stateProofIntervalForHandlerTests+1))
+ a.Equal(200, responseRecorder.Code)
+
+ stprfResp := generated.StateProofResponse{}
+ a.NoError(json.Unmarshal(responseRecorder.Body.Bytes(), &stprfResp))
+
+ a.Equal([]byte{0x0, 0x1, 0x2}, stprfResp.Message.BlockHeadersCommitment)
+}
+
+func TestHeaderProofRoundTooHigh(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ a.NoError(handler.GetLightBlockHeaderProof(ctx, 2))
+ a.Equal(500, responseRecorder.Code)
+}
+
+func TestHeaderProofStateProofNotFound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ insertRounds(a, handler, 700)
+
+ a.NoError(handler.GetLightBlockHeaderProof(ctx, 650))
+ a.Equal(404, responseRecorder.Code)
+}
+
+func TestGetBlockProof200(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t)
+ defer releasefunc()
+
+ insertRounds(a, handler, 1000)
+
+ a.NoError(handler.GetLightBlockHeaderProof(ctx, stateProofIntervalForHandlerTests*2+2))
+ a.Equal(200, responseRecorder.Code)
+
+ blkHdrArr, err := stateproof.FetchLightHeaders(handler.Node.LedgerForAPI(), stateProofIntervalForHandlerTests, basics.Round(stateProofIntervalForHandlerTests*3))
a.NoError(err)
+
+ leafproof, err := stateproof.GenerateProofOfLightBlockHeaders(stateProofIntervalForHandlerTests, blkHdrArr, 1)
+ a.NoError(err)
+
+ proofResp := generated.LightBlockHeaderProofResponse{}
+ a.NoError(json.Unmarshal(responseRecorder.Body.Bytes(), &proofResp))
+ a.Equal(proofResp.Proof, leafproof.GetConcatenatedProof())
+ a.Equal(proofResp.Treedepth, uint64(leafproof.TreeDepth))
+}
+
+func TestStateproofTransactionForRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ledger := mockLedger{blocks: make([]bookkeeping.Block, 0, 1000)}
+ for i := 0; i <= 1000; i++ {
+ var blk bookkeeping.Block
+ blk.BlockHeader = bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ }
+ blk = addStateProofIfNeeded(blk)
+ ledger.blocks = append(ledger.blocks, blk)
+ }
+
+ txn, err := v2.GetStateProofTransactionForRound(&ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000)
+ a.NoError(err)
+ a.Equal(2*stateProofIntervalForHandlerTests+1, txn.Message.FirstAttestedRound)
+ a.Equal(3*stateProofIntervalForHandlerTests, txn.Message.LastAttestedRound)
+ a.Equal([]byte{0x0, 0x1, 0x2}, txn.Message.BlockHeadersCommitment)
+
+ txn, err = v2.GetStateProofTransactionForRound(&ledger, basics.Round(2*stateProofIntervalForHandlerTests), 1000)
+ a.NoError(err)
+ a.Equal(stateProofIntervalForHandlerTests+1, txn.Message.FirstAttestedRound)
+ a.Equal(2*stateProofIntervalForHandlerTests, txn.Message.LastAttestedRound)
+
+ txn, err = v2.GetStateProofTransactionForRound(&ledger, 999, 1000)
+ a.ErrorIs(err, v2.ErrNoStateProofForRound)
+
+ txn, err = v2.GetStateProofTransactionForRound(&ledger, basics.Round(2*stateProofIntervalForHandlerTests), basics.Round(2*stateProofIntervalForHandlerTests))
+ a.ErrorIs(err, v2.ErrNoStateProofForRound)
}
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 3ed69127e..ba2a29549 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -88,6 +88,7 @@ type mockNode struct {
err error
id account.ParticipationID
keys account.StateProofKeys
+ usertxns map[basics.Address][]node.TxnWithStatus
}
func (m mockNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
@@ -117,7 +118,9 @@ func makeMockNode(ledger v2.LedgerForAPI, genesisID string, nodeError error) *mo
ledger: ledger,
genesisID: genesisID,
config: config.GetDefaultLocal(),
- err: nodeError}
+ err: nodeError,
+ usertxns: map[basics.Address][]node.TxnWithStatus{},
+ }
}
func (m mockNode) LedgerForAPI() v2.LedgerForAPI {
@@ -167,7 +170,12 @@ func (m mockNode) ListeningAddress() (string, bool) {
func (m mockNode) Stop() {}
func (m mockNode) ListTxns(addr basics.Address, minRound basics.Round, maxRound basics.Round) ([]node.TxnWithStatus, error) {
- return nil, fmt.Errorf("listtxns not implemented")
+ txns, ok := m.usertxns[addr]
+ if !ok {
+ return nil, fmt.Errorf("no txns for %s", addr)
+ }
+
+ return txns, nil
}
func (m mockNode) GetTransaction(addr basics.Address, txID transactions.Txid, minRound basics.Round, maxRound basics.Round) (node.TxnWithStatus, bool) {
diff --git a/daemon/algod/api/spec/v1/model.go b/daemon/algod/api/spec/v1/model.go
index b01db103e..9ae6fe23c 100644
--- a/daemon/algod/api/spec/v1/model.go
+++ b/daemon/algod/api/spec/v1/model.go
@@ -524,10 +524,10 @@ type Transaction struct {
// required: true
ApplicationCall *ApplicationCallTransactionType `json:"app,omitempty"`
- // CompactCert
+ // StateProof
//
// required: true
- CompactCert *CompactCertTransactionType `json:"compactcert,omitempty"`
+ StateProof *StateProofTransactionType `json:"sp,omitempty"`
// FromRewards is the amount of pending rewards applied to the From
// account as part of this transaction.
@@ -776,19 +776,20 @@ type ApplicationCallTransactionType struct {
OnCompletion string `json:"oncompletion"`
}
-// CompactCertTransactionType contains the additional fields for a compact cert transaction
-// swagger:model CompactCertTransactionType
-type CompactCertTransactionType struct {
- // CertRound is the round whose block this compact cert refers to.
+// StateProofTransactionType contains the additional fields for a state proof transaction
+// swagger:model StateProofTransactionType
+type StateProofTransactionType struct {
+ // StateProof is the msgpack encoding of the state proof.
//
// required: true
- CertRound uint64 `json:"rnd"`
+ // swagger:strfmt byte
+ StateProof []byte `json:"sp"`
- // Cert is the msgpack encoding of the compact cert.
+ // StateProofMessage is the msgpack encoding of the state proof message.
//
// required: true
// swagger:strfmt byte
- Cert []byte `json:"cert"`
+ StateProofMessage []byte `json:"spmsg"`
}
// TransactionList contains a list of transactions
@@ -941,24 +942,6 @@ type Block struct {
UpgradeState
UpgradeVote
-
- // CompactCertVoters is the root of the merkle tree of voters for compact certs.
- //
- // required: true
- // swagger:strfmt byte
- CompactCertVoters []byte `json:"compactCertVoters"`
-
- // CompactCertVotersTotal is the total amount of microalgos held by the voters in
- // the CompactCertVoters merkle tree.
- //
- // required: true
- CompactCertVotersTotal uint64 `json:"compactCertVotersTotal"`
-
- // CompactCertNextRound is the next round for which a compact certificate is
- // expected.
- //
- // required: true
- CompactCertNextRound uint64 `json:"compactCertNextRound"`
}
// UpgradeState contains the information about a current state of an upgrade
diff --git a/daemon/algod/api/swagger.json b/daemon/algod/api/swagger.json
index 5a788df54..045570222 100644
--- a/daemon/algod/api/swagger.json
+++ b/daemon/algod/api/swagger.json
@@ -1247,30 +1247,9 @@
"round",
"period",
"txnRoot",
- "timestamp",
- "compactCertVoters",
- "compactCertVotersTotal",
- "compactCertNextRound"
+ "timestamp"
],
"properties": {
- "compactCertNextRound": {
- "description": "CompactCertNextRound is the next round for which a compact certificate is\nexpected.",
- "type": "integer",
- "format": "uint64",
- "x-go-name": "CompactCertNextRound"
- },
- "compactCertVoters": {
- "description": "CompactCertVoters is the root of the merkle tree of voters for compact certs.",
- "type": "string",
- "format": "byte",
- "x-go-name": "CompactCertVoters"
- },
- "compactCertVotersTotal": {
- "description": "CompactCertVotersTotal is the total amount of microalgos held by the voters in\nthe CompactCertVoters merkle tree.",
- "type": "integer",
- "format": "uint64",
- "x-go-name": "CompactCertVotersTotal"
- },
"currentProtocol": {
"description": "CurrentProtocol is a string that represents the current protocol",
"type": "string",
@@ -1418,29 +1397,6 @@
},
"x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/common"
},
- "CompactCertTransactionType": {
- "description": "CompactCertTransactionType contains the additional fields for a compact cert transaction",
- "type": "object",
- "required": [
- "rnd",
- "cert"
- ],
- "properties": {
- "cert": {
- "description": "Cert is the msgpack encoding of the compact cert.",
- "type": "string",
- "format": "byte",
- "x-go-name": "Cert"
- },
- "rnd": {
- "description": "CertRound is the round whose block this compact cert refers to.",
- "type": "integer",
- "format": "uint64",
- "x-go-name": "CertRound"
- }
- },
- "x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
- },
"KeyregTransactionType": {
"description": "KeyregTransactionType contains the additional fields for a keyreg Transaction",
"type": "object",
@@ -1654,6 +1610,29 @@
},
"x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
},
+ "StateProofTransactionType": {
+ "description": "StateProofTransactionType contains the additional fields for a state proof transaction",
+ "type": "object",
+ "required": [
+ "sp",
+ "spmsg"
+ ],
+ "properties": {
+ "sp": {
+ "description": "StateProof is the msgpack encoding of the state proof.",
+ "type": "string",
+ "format": "byte",
+ "x-go-name": "StateProof"
+ },
+ "spmsg": {
+ "description": "StateProofMessage is the msgpack encoding of the state proof message.",
+ "type": "string",
+ "format": "byte",
+ "x-go-name": "StateProofMessage"
+ }
+ },
+ "x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ },
"StateSchema": {
"description": "swagger: model StateSchema",
"type": "object",
@@ -1719,7 +1698,7 @@
"first-round",
"last-round",
"app",
- "compactcert",
+ "sp",
"genesisID",
"genesishashb64"
],
@@ -1727,9 +1706,6 @@
"app": {
"$ref": "#/definitions/ApplicationCallTransactionType"
},
- "compactcert": {
- "$ref": "#/definitions/CompactCertTransactionType"
- },
"curcfg": {
"$ref": "#/definitions/AssetConfigTransactionType"
},
@@ -1814,6 +1790,9 @@
"format": "uint64",
"x-go-name": "ConfirmedRound"
},
+ "sp": {
+ "$ref": "#/definitions/StateProofTransactionType"
+ },
"tx": {
"description": "TxID is the transaction ID",
"type": "string",
diff --git a/data/account/msgp_gen.go b/data/account/msgp_gen.go
index 5fa8daa6e..96934260c 100644
--- a/data/account/msgp_gen.go
+++ b/data/account/msgp_gen.go
@@ -4,8 +4,6 @@ package account
import (
"github.com/algorand/msgp/msgp"
-
- "github.com/algorand/go-algorand/crypto/merklesignature"
)
// The following msgp objects are implemented in this file:
@@ -25,14 +23,6 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
-// StateProofVerifier
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// MarshalMsg implements msgp.Marshaler
func (z *ParticipationKeyIdentity) MarshalMsg(b []byte) (o []byte) {
@@ -327,31 +317,3 @@ func (z StateProofKeys) Msgsize() (s int) {
func (z StateProofKeys) MsgIsZero() bool {
return len(z) == 0
}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *StateProofVerifier) MarshalMsg(b []byte) []byte {
- return ((*(merklesignature.Verifier))(z)).MarshalMsg(b)
-}
-func (_ *StateProofVerifier) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*StateProofVerifier)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *StateProofVerifier) UnmarshalMsg(bts []byte) ([]byte, error) {
- return ((*(merklesignature.Verifier))(z)).UnmarshalMsg(bts)
-}
-func (_ *StateProofVerifier) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*StateProofVerifier)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *StateProofVerifier) Msgsize() int {
- return ((*(merklesignature.Verifier))(z)).Msgsize()
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *StateProofVerifier) MsgIsZero() bool {
- return ((*(merklesignature.Verifier))(z)).MsgIsZero()
-}
diff --git a/data/account/participation.go b/data/account/participation.go
index 6b05ff4ba..3209fd7ea 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -48,7 +48,7 @@ type Participation struct {
VRF *crypto.VRFSecrets
Voting *crypto.OneTimeSignatureSecrets
- // StateProofSecrets is used to sign compact certificates.
+ // StateProofSecrets is used to sign state proofs.
StateProofSecrets *merklesignature.Secrets
// The first and last rounds for which this account is valid, respectively.
@@ -141,7 +141,7 @@ func (part Participation) VotingSecrets() *crypto.OneTimeSignatureSecrets {
return part.Voting
}
-// StateProofSigner returns the key used to sign on Compact Certificates.
+// StateProofSigner returns the key used to sign on State Proofs.
// might return nil!
func (part Participation) StateProofSigner() *merklesignature.Secrets {
return part.StateProofSecrets
@@ -168,9 +168,9 @@ func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos,
SelectionPK: part.VRF.PK,
},
}
- if cert := part.StateProofSigner(); cert != nil {
+ if stateProofSigner := part.StateProofSigner(); stateProofSigner != nil {
if includeStateProofKeys { // TODO: remove this check and parameter after the network had enough time to upgrade
- t.KeyregTxnFields.StateProofPK = *(cert.GetVerifier())
+ t.KeyregTxnFields.StateProofPK = stateProofSigner.GetVerifier().Commitment
}
}
t.KeyregTxnFields.VoteFirst = part.FirstValid
@@ -220,10 +220,7 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs
return
}
- // TODO: change to ConsensusCurrentVersion when updated
- interval := config.Consensus[protocol.ConsensusFuture].CompactCertRounds
maxValidPeriod := config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod
-
if maxValidPeriod != 0 && uint64(lastValid-firstValid) > maxValidPeriod {
return PersistedParticipation{}, fmt.Errorf("the validity period for mss is too large: the limit is %d", maxValidPeriod)
}
@@ -239,8 +236,8 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs
// Generate a new VRF key, which lives in the participation keys db
vrf := crypto.GenerateVRFSecrets()
- // Generate a new key which signs the compact certificates
- stateProofSecrets, err := merklesignature.New(uint64(firstValid), uint64(lastValid), interval)
+ // Generate a new key which signs the state proof
+ stateProofSecrets, err := merklesignature.New(uint64(firstValid), uint64(lastValid), merklesignature.KeyLifetimeDefault)
if err != nil {
return PersistedParticipation{}, err
}
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index d1c35c2ce..e36f22451 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -24,8 +24,6 @@ import (
"fmt"
"time"
- "github.com/algorand/go-deadlock"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
@@ -33,6 +31,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-deadlock"
)
const defaultTimeout = 5 * time.Second
@@ -80,14 +79,11 @@ type (
EffectiveFirst basics.Round
EffectiveLast basics.Round
- StateProof *StateProofVerifier
+ StateProof *merklesignature.Verifier
VRF *crypto.VRFSecrets
Voting *crypto.OneTimeSignatureSecrets
}
- // StateProofVerifier defined the type used for the stateproofs public key
- StateProofVerifier merklesignature.Verifier
-
// StateProofKeys represents a set of ephemeral stateproof keys with their corresponding round
//msgp:allocbound StateProofKeys 1000
StateProofKeys []merklesignature.KeyRoundPair
@@ -98,10 +94,10 @@ type (
ParticipationRecord
}
- // StateProofRecordForRound contains participant's state proof secrets that corresponds to
+ // StateProofSecretsForRound contains participant's state proof secrets that corresponds to
// one specific round. In Addition, it also returns the participation metadata.
// If there are no secrets for the round a nil is returned in Stateproof field.
- StateProofRecordForRound struct {
+ StateProofSecretsForRound struct {
ParticipationRecord
StateProofSecrets *merklesignature.Signer
@@ -146,10 +142,11 @@ func (r ParticipationRecord) Duplicate() ParticipationRecord {
voting = r.Voting.Snapshot()
}
- var stateProof *StateProofVerifier
+ var stateProof *merklesignature.Verifier
if r.StateProof != nil {
- stateProof = &StateProofVerifier{}
- copy(stateProof[:], r.StateProof[:])
+ stateProof = &merklesignature.Verifier{}
+ copy(stateProof.Commitment[:], r.StateProof.Commitment[:])
+ stateProof.KeyLifetime = r.StateProof.KeyLifetime
}
dupParticipation := ParticipationRecord{
@@ -232,6 +229,9 @@ type ParticipationRegistry interface {
// once, an error will occur when the data is flushed when inserting a duplicate key.
AppendKeys(id ParticipationID, keys StateProofKeys) error
+ // DeleteStateProofKeys removes all stateproof keys preceding a given round (including)
+ DeleteStateProofKeys(id ParticipationID, round basics.Round) error
+
// Delete removes a record from storage.
Delete(id ParticipationID) error
@@ -247,8 +247,8 @@ type ParticipationRegistry interface {
// GetForRound fetches a record with voting secrets for a particular round.
GetForRound(id ParticipationID, round basics.Round) (ParticipationRecordForRound, error)
- // GetStateProofForRound fetches a record with stateproof secrets for a particular round.
- GetStateProofForRound(id ParticipationID, round basics.Round) (StateProofRecordForRound, error)
+ // GetStateProofSecretsForRound fetches a record with stateproof secrets for a particular round.
+ GetStateProofSecretsForRound(id ParticipationID, round basics.Round) (StateProofSecretsForRound, error)
// HasLiveKeys quickly tests to see if there is a valid participation key over some range of rounds
HasLiveKeys(from, to basics.Round) bool
@@ -344,6 +344,7 @@ const (
insertKeysetQuery = `INSERT INTO Keysets (participationID, account, firstValidRound, lastValidRound, keyDilution, vrf, stateProof) VALUES (?, ?, ?, ?, ?, ?, ?)`
insertRollingQuery = `INSERT INTO Rolling (pk, voting) VALUES (?, ?)`
appendStateProofKeysQuery = `INSERT INTO StateProofKeys (pk, round, key) VALUES(?, ?, ?)`
+ deleteStateProofKeysQuery = `DELETE FROM StateProofKeys WHERE pk=? AND round<=?`
// SELECT pk FROM Keysets WHERE participationID = ?
selectPK = `SELECT pk FROM Keysets WHERE participationID = ? LIMIT 1`
@@ -363,6 +364,7 @@ const (
AND pk IN (SELECT pk FROM Keysets WHERE participationID=?)`
deleteKeysets = `DELETE FROM Keysets WHERE pk=?`
deleteRolling = `DELETE FROM Rolling WHERE pk=?`
+ deleteStateProofByPK = `DELETE FROM StateProofKeys WHERE pk=?`
updateRollingFieldsSQL = `UPDATE Rolling
SET lastVoteRound=?,
lastBlockProposalRound=?,
@@ -413,6 +415,22 @@ type participationDB struct {
flushTimeout time.Duration
}
+// DeleteStateProofKeys is a non-blocking operation, responsible for removing state-proof keys from the DB.
+func (db *participationDB) DeleteStateProofKeys(id ParticipationID, round basics.Round) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ if _, ok := db.cache[id]; !ok {
+ return ErrParticipationIDNotFound
+ }
+
+ db.writeQueue <- makeOpRequest(&deleteStateProofKeysOp{
+ ParticipationID: id,
+ round: round,
+ })
+ return nil
+}
+
type updatingParticipationRecord struct {
ParticipationRecord
@@ -445,6 +463,7 @@ func (db *participationDB) initializeCache() error {
func (db *participationDB) writeThread() {
defer close(db.writeQueueDone)
var lastErr error
+
for op := range db.writeQueue {
if err := op.operation.apply(db); err != nil {
lastErr = err
@@ -502,11 +521,11 @@ func (db *participationDB) Insert(record Participation) (id ParticipationID, err
*voting = record.Voting.Snapshot()
}
- var stateProofVeriferPtr *StateProofVerifier
+ var stateProofVerifierPtr *merklesignature.Verifier
if record.StateProofSecrets != nil {
- stateProofVeriferPtr = &StateProofVerifier{}
- copy(stateProofVeriferPtr[:], record.StateProofSecrets.GetVerifier()[:])
-
+ stateProofVerifierPtr = &merklesignature.Verifier{}
+ copy(stateProofVerifierPtr.Commitment[:], record.StateProofSecrets.GetVerifier().Commitment[:])
+ stateProofVerifierPtr.KeyLifetime = record.StateProofSecrets.GetVerifier().KeyLifetime
}
// update cache.
@@ -521,7 +540,7 @@ func (db *participationDB) Insert(record Participation) (id ParticipationID, err
LastStateProof: 0,
EffectiveFirst: 0,
EffectiveLast: 0,
- StateProof: stateProofVeriferPtr,
+ StateProof: stateProofVerifierPtr,
Voting: voting,
VRF: vrf,
}
@@ -608,7 +627,7 @@ func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
var lastVote sql.NullInt64
var lastBlockProposal sql.NullInt64
- var lastCompactCertificate sql.NullInt64
+ var lastStateProof sql.NullInt64
var effectiveFirst sql.NullInt64
var effectiveLast sql.NullInt64
@@ -622,7 +641,7 @@ func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
&rawStateProof,
&lastVote,
&lastBlockProposal,
- &lastCompactCertificate,
+ &lastStateProof,
&effectiveFirst,
&effectiveLast,
&rawVoting,
@@ -648,8 +667,9 @@ func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
if err != nil {
return nil, fmt.Errorf("unable to decode stateproof: %w", err)
}
- var stateProofVerifer StateProofVerifier
- copy(stateProofVerifer[:], stateProof.GetVerifier()[:])
+ var stateProofVerifer merklesignature.Verifier
+ copy(stateProofVerifer.Commitment[:], stateProof.GetVerifier().Commitment[:])
+ stateProofVerifer.KeyLifetime = stateProof.GetVerifier().KeyLifetime
record.StateProof = &stateProofVerifer
}
@@ -670,8 +690,8 @@ func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
record.LastBlockProposal = basics.Round(lastBlockProposal.Int64)
}
- if lastCompactCertificate.Valid {
- record.LastStateProof = basics.Round(lastCompactCertificate.Int64)
+ if lastStateProof.Valid {
+ record.LastStateProof = basics.Round(lastStateProof.Int64)
}
if effectiveFirst.Valid {
@@ -741,20 +761,25 @@ func (db *participationDB) HasLiveKeys(from, to basics.Round) bool {
return false
}
-// GetStateProofForRound returns the state proof data required to sign the compact certificate for this round
-func (db *participationDB) GetStateProofForRound(id ParticipationID, round basics.Round) (StateProofRecordForRound, error) {
+// GetStateProofSecretsForRound returns the state proof data required to sign the compact certificate for this round
+func (db *participationDB) GetStateProofSecretsForRound(id ParticipationID, round basics.Round) (StateProofSecretsForRound, error) {
partRecord, err := db.GetForRound(id, round)
if err != nil {
- return StateProofRecordForRound{}, err
+ return StateProofSecretsForRound{}, err
}
- var result StateProofRecordForRound
+ var result StateProofSecretsForRound
result.ParticipationRecord = partRecord.ParticipationRecord
var rawStateProofKey []byte
err = db.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
// fetch secret key
- row := tx.QueryRow(selectStateProofKey, round, id[:])
- err := row.Scan(&rawStateProofKey)
+ keyFirstValidRound, err := partRecord.StateProof.FirstRoundInKeyLifetime(uint64(round))
+ if err != nil {
+ return err
+ }
+
+ row := tx.QueryRow(selectStateProofKey, keyFirstValidRound, id[:])
+ err = row.Scan(&rawStateProofKey)
if err == sql.ErrNoRows {
return ErrSecretNotFound
}
@@ -764,13 +789,8 @@ func (db *participationDB) GetStateProofForRound(id ParticipationID, round basic
return nil
})
- switch err {
- case nil:
- // no error, continue
- case ErrSecretNotFound: // not considered an error (yet), since some accounts may not have registered state proof yet
- return result, nil
- default:
- return StateProofRecordForRound{}, err
+ if err != nil {
+ return StateProofSecretsForRound{}, fmt.Errorf("failed to fetch state proof for round %d: %w", round, err)
}
// Init stateproof fields after being able to retrieve key from database
@@ -780,7 +800,7 @@ func (db *participationDB) GetStateProofForRound(id ParticipationID, round basic
err = protocol.Decode(rawStateProofKey, result.StateProofSecrets.SigningKey)
if err != nil {
- return StateProofRecordForRound{}, err
+ return StateProofSecretsForRound{}, err
}
var rawSignerContext []byte
@@ -794,11 +814,11 @@ func (db *participationDB) GetStateProofForRound(id ParticipationID, round basic
return nil
})
if err != nil {
- return StateProofRecordForRound{}, err
+ return StateProofSecretsForRound{}, err
}
err = protocol.Decode(rawSignerContext, &result.StateProofSecrets.SignerContext)
if err != nil {
- return StateProofRecordForRound{}, err
+ return StateProofSecretsForRound{}, err
}
return result, nil
}
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
index 7abb83ff7..8e02ff4e4 100644
--- a/data/account/participationRegistry_test.go
+++ b/data/account/participationRegistry_test.go
@@ -17,6 +17,7 @@
package account
import (
+ "bytes"
"context"
"database/sql"
"encoding/binary"
@@ -26,6 +27,7 @@ import (
"os"
"path/filepath"
"strconv"
+ "sync/atomic"
"strings"
"sync"
@@ -45,8 +47,7 @@ import (
"github.com/algorand/go-algorand/util/db"
)
-// TODO: change to ConsensusCurrentVersion when updated
-var CompactCertRounds = config.Consensus[protocol.ConsensusFuture].CompactCertRounds
+var stateProofIntervalForTests = config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
func getRegistry(t testing.TB) (registry *participationDB, dbfile string) {
return getRegistryImpl(t, true, false)
@@ -80,16 +81,21 @@ func assertParticipation(t testing.TB, p Participation, pr ParticipationRecord)
require.Equal(t, p.KeyDilution, pr.KeyDilution)
require.Equal(t, p.Parent, pr.Account)
if p.StateProofSecrets != nil {
- require.Equal(t, p.StateProofSecrets.GetVerifier()[:], pr.StateProof[:])
+ require.Equal(t, p.StateProofSecrets.GetVerifier().Commitment[:], pr.StateProof.Commitment[:])
+ require.Equal(t, p.StateProofSecrets.GetVerifier().KeyLifetime, pr.StateProof.KeyLifetime)
}
}
func makeTestParticipation(a *require.Assertions, addrID int, first, last basics.Round, dilution uint64) Participation {
+ return makeTestParticipationWithLifetime(a, addrID, first, last, dilution, uint64((last+1)/2))
+}
+
+func makeTestParticipationWithLifetime(a *require.Assertions, addrID int, first, last basics.Round, dilution uint64, keyLifetime uint64) Participation {
a.True(first < last)
// Generate sample of stateproof keys. because it might take time we will reduce the number always to get 2 keys
- stateProofSecrets, err := merklesignature.New(uint64(first), uint64(last), (uint64(last)+1)/2)
+ stateProofSecrets, err := merklesignature.New(uint64(first), uint64(last), keyLifetime)
a.NoError(err)
// Generate part keys like in partGenerateCmd and FillDBWithParticipationKeys
@@ -314,6 +320,67 @@ func TestParticipation_DeleteExpired(t *testing.T) {
checkExpired(registry.GetAll())
}
+func TestParticipation_CleanupTablesAfterDeleteExpired(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry, dbfile := getRegistryImpl(t, false, true) // inMem=false, erasable=true
+ defer registryCloseTest(t, registry, dbfile)
+
+ keyDilution := 1
+ for i := 10; i < 20; i++ {
+ p := makeTestParticipation(a, i, 1, basics.Round(i), uint64(keyDilution))
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ err = registry.AppendKeys(id, p.StateProofSecrets.GetAllKeys())
+ a.NoError(err)
+ }
+
+ a.NoError(registry.Flush(defaultTimeout))
+
+ latestRound := basics.Round(50)
+ err := registry.DeleteExpired(latestRound, config.Consensus[protocol.ConsensusCurrentVersion])
+ a.NoError(err)
+
+ a.NoError(registry.Flush(defaultTimeout))
+ var numOfRecords int
+ // make sure tables are clean
+ err = registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ row := tx.QueryRow(`select count(*) from Keysets`)
+ err = row.Scan(&numOfRecords)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+ return nil
+ })
+
+ a.NoError(err)
+ a.Equal(0, numOfRecords)
+
+ err = registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ row := tx.QueryRow(`select count(*) from Rolling`)
+ err = row.Scan(&numOfRecords)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+ return nil
+ })
+ a.NoError(err)
+ a.Equal(0, numOfRecords)
+
+ err = registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ row := tx.QueryRow(`select count(*) from stateproofkeys`)
+ err = row.Scan(&numOfRecords)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+ return nil
+ })
+ a.NoError(err)
+ a.Equal(0, numOfRecords)
+}
+
// Make sure the register function properly sets effective first/last for all effected records.
func TestParticipation_Register(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -847,7 +914,7 @@ func TestAddStateProofKeys(t *testing.T) {
// Install a key to add StateProof keys.
max := uint64(20)
- p := makeTestParticipation(a, 1, 0, basics.Round(max), 3)
+ p := makeTestParticipationWithLifetime(a, 1, 0, basics.Round(max), 3, 3)
id, err := registry.Insert(p)
a.NoError(err)
a.Equal(p.ID(), id)
@@ -859,15 +926,7 @@ func TestAddStateProofKeys(t *testing.T) {
signer, err := merklesignature.New(1, max, 3)
a.NoError(err)
// Initialize keys array.
- var keys StateProofKeys
- for i := uint64(1); i < max; i++ {
- k := signer.GetKey(i)
- if k == nil {
- continue
- }
- keysRound := merklesignature.KeyRoundPair{Round: i, Key: k}
- keys = append(keys, keysRound)
- }
+ keys := signer.GetAllKeys()
err = registry.AppendKeys(id, keys)
a.NoError(err)
@@ -876,18 +935,26 @@ func TestAddStateProofKeys(t *testing.T) {
err = registry.Flush(10 * time.Second)
a.NoError(err)
- j := 0
+ _, err = registry.GetStateProofSecretsForRound(id, basics.Round(1))
+ a.Error(err)
+ _, err = registry.GetStateProofSecretsForRound(id, basics.Round(2))
+ a.Error(err)
+
// Make sure we're able to fetch the same data that was put in.
- for i := uint64(1); i < max; i++ {
- r, err := registry.GetStateProofForRound(id, basics.Round(i))
+ for i := uint64(3); i < max; i++ {
+ r, err := registry.GetStateProofSecretsForRound(id, basics.Round(i))
a.NoError(err)
if r.StateProofSecrets != nil {
+ j := i/3 - 1 // idx in keys array
+
a.Equal(*keys[j].Key, *r.StateProofSecrets.SigningKey)
- a.Equal(keys[j].Round, i)
- j++
- }
+ keyFirstValidRound, err := r.StateProofSecrets.FirstRoundInKeyLifetime()
+ a.NoError(err)
+
+ a.Equal(keys[j].Round, keyFirstValidRound)
+ }
}
}
@@ -903,12 +970,9 @@ func TestSecretNotFound(t *testing.T) {
a.NoError(err)
a.Equal(p.ID(), id)
- r, err := registry.GetStateProofForRound(id, basics.Round(2))
+ _, err = registry.GetForRound(id, basics.Round(2))
a.NoError(err)
- // Empty stateproof key
- a.Nil(r.StateProofSecrets)
-
_, err = registry.GetForRound(id, basics.Round(100))
a.ErrorIs(err, ErrRequestedRoundOutOfRange)
}
@@ -924,7 +988,7 @@ func TestAddingSecretTwice(t *testing.T) {
panic(err)
}
root, err := GenerateRoot(access)
- p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(CompactCertRounds*2), 3)
+ p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(stateProofIntervalForTests*2), 3)
access.Close()
a.NoError(err)
@@ -936,7 +1000,7 @@ func TestAddingSecretTwice(t *testing.T) {
// Append key
var keys StateProofKeys
- keysRound := merklesignature.KeyRoundPair{Round: CompactCertRounds, Key: p.StateProofSecrets.GetKey(CompactCertRounds)}
+ keysRound := merklesignature.KeyRoundPair{Round: stateProofIntervalForTests, Key: p.StateProofSecrets.GetKey(stateProofIntervalForTests)}
keys = append(keys, keysRound)
err = registry.AppendKeys(id, keys)
@@ -962,7 +1026,7 @@ func TestGetRoundSecretsWithoutStateProof(t *testing.T) {
panic(err)
}
root, err := GenerateRoot(access)
- p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(CompactCertRounds*2), 3)
+ p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(stateProofIntervalForTests*2), 3)
access.Close()
a.NoError(err)
@@ -972,34 +1036,109 @@ func TestGetRoundSecretsWithoutStateProof(t *testing.T) {
a.NoError(registry.Flush(defaultTimeout))
- partPerRound, err := registry.GetStateProofForRound(id, 1)
- a.NoError(err)
- a.Nil(partPerRound.StateProofSecrets)
+ partPerRound, err := registry.GetStateProofSecretsForRound(id, 1)
+ a.Error(err)
- // Should return nil as well since no state proof keys were added
- partPerRound, err = registry.GetStateProofForRound(id, basics.Round(CompactCertRounds))
- a.NoError(err)
- a.Nil(partPerRound.StateProofSecrets)
+ partPerRound, err = registry.GetStateProofSecretsForRound(id, basics.Round(stateProofIntervalForTests))
+ a.Error(err)
// Append key
keys := make(StateProofKeys, 1)
- keys[0] = merklesignature.KeyRoundPair{Round: CompactCertRounds, Key: p.StateProofSecrets.GetKey(CompactCertRounds)}
+ keys[0] = merklesignature.KeyRoundPair{Round: stateProofIntervalForTests, Key: p.StateProofSecrets.GetKey(stateProofIntervalForTests)}
err = registry.AppendKeys(id, keys)
a.NoError(err)
a.NoError(registry.Flush(defaultTimeout))
- partPerRound, err = registry.GetStateProofForRound(id, basics.Round(CompactCertRounds)-1)
- a.NoError(err)
- a.Nil(partPerRound.StateProofSecrets)
+ partPerRound, err = registry.GetStateProofSecretsForRound(id, basics.Round(stateProofIntervalForTests)-1)
+ a.Error(err)
- partPerRound, err = registry.GetStateProofForRound(id, basics.Round(CompactCertRounds))
+ partPerRound, err = registry.GetStateProofSecretsForRound(id, basics.Round(stateProofIntervalForTests))
a.NoError(err)
a.NotNil(partPerRound.StateProofSecrets)
a.Equal(*partPerRound.StateProofSecrets.SigningKey, *keys[0].Key)
- a.Equal(CompactCertRounds, keys[0].Round)
+ a.Equal(stateProofIntervalForTests, keys[0].Round)
+}
+
+type keypairs []merklesignature.KeyRoundPair
+
+func (k keypairs) findPairForSpecificRound(round uint64) merklesignature.KeyRoundPair {
+ for _, pair := range k {
+ if pair.Round == round {
+ return pair
+ }
+ }
+ return merklesignature.KeyRoundPair{}
+}
+
+func TestDeleteStateProofKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry, dbfile := getRegistry(t)
+ defer registryCloseTest(t, registry, dbfile)
+
+ // Install a key to add StateProof keys.
+ maxRound := uint64(20)
+ p := makeTestParticipationWithLifetime(a, 1, 0, basics.Round(maxRound), 3, 4)
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ // Wait for async DB operations to finish.
+ a.NoError(registry.Flush(10 * time.Second))
+
+ keys := keypairs(p.StateProofSecrets.GetAllKeys())
+
+ a.NoError(registry.AppendKeys(id, StateProofKeys(keys)))
+
+ // Wait for async DB operations to finish.
+ a.NoError(registry.Flush(10 * time.Second))
+
+ // Make sure we're able to fetch the same data that was put in.
+ for i := uint64(4); i < maxRound; i += 4 {
+ r, err := registry.GetStateProofSecretsForRound(id, basics.Round(i))
+ a.NoError(err)
+
+ a.Equal(keys.findPairForSpecificRound(i).Key, r.StateProofSecrets.SigningKey)
+ }
+
+ removeKeysRound := basics.Round(maxRound / 2)
+ a.NoError(registry.DeleteStateProofKeys(id, removeKeysRound))
+
+ a.NoError(registry.Flush(10 * time.Second))
+
+ // verify that the db does not contain any state proof key with round less than 10
+
+ registry.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var pk int
+ a.NoError(tx.QueryRow(selectPK, id[:]).Scan(&pk))
+
+ // make certain keys below the cutting round do not exist in the db.
+ var num int
+ a.NoError(
+ tx.QueryRow(
+ "SELECT COUNT(*) FROM StateProofKeys where pk=? AND round <=?",
+ pk,
+ removeKeysRound,
+ ).Scan(&num),
+ )
+ a.Zero(num)
+
+ // make certain keys above the cutting round exist in the db.
+ a.NoError(
+ tx.QueryRow(
+ "SELECT COUNT(*) FROM StateProofKeys where pk=? AND round >?",
+ pk,
+ removeKeysRound,
+ ).Scan(&num),
+ )
+
+ // includes removeKeysRound
+ a.Equal(int(maxRound)/4-int(removeKeysRound)/4, num) // 1 DELETED 1 NOT
+ return nil
+ })
}
// test that sets up an error that should come up while flushing, and ensures that flush resets the last error
@@ -1013,7 +1152,7 @@ func TestFlushResetsLastError(t *testing.T) {
a.NoError(err)
root, err := GenerateRoot(access)
- p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(CompactCertRounds*2), 3)
+ p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(stateProofIntervalForTests*2), 3)
access.Close()
a.NoError(err)
@@ -1025,7 +1164,7 @@ func TestFlushResetsLastError(t *testing.T) {
// Append key
var keys StateProofKeys
- keysRound := merklesignature.KeyRoundPair{Round: CompactCertRounds, Key: p.StateProofSecrets.GetKey(CompactCertRounds)}
+ keysRound := merklesignature.KeyRoundPair{Round: stateProofIntervalForTests, Key: p.StateProofSecrets.GetKey(stateProofIntervalForTests)}
keys = append(keys, keysRound)
err = registry.AppendKeys(id, keys)
@@ -1039,6 +1178,129 @@ func TestFlushResetsLastError(t *testing.T) {
a.NoError(registry.Flush(10 * time.Second))
}
+// TestParticipationDB_Locking tries fetching StateProof keys from the DB while the Rolling table is being updated.
+// Makes sure the table is not locked for reading while a different one is locked for writing.
+func TestParticipationDB_Locking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ dbName := strings.Replace(t.Name(), "/", "_", -1)
+
+ dbpair, err := db.OpenErasablePair(dbName + ".sqlite3")
+ a.NoError(err)
+
+ var bufNewLogger bytes.Buffer
+ log := logging.NewLogger()
+ log.SetLevel(logging.Warn)
+ log.SetOutput(&bufNewLogger)
+ dbpair.Rdb.SetLogger(log)
+
+ registry, err := makeParticipationRegistry(dbpair, logging.TestingLog(t))
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+
+ defer registryCloseTest(t, registry, dbName)
+
+ var id2 ParticipationID
+ for i := 0; i < 3; i++ {
+ part := makeTestParticipation(a, 1, 0, 511, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ id, err := registry.Insert(part)
+ if i == 0 {
+ id2 = id
+ }
+ a.NoError(err)
+ a.NoError(registry.AppendKeys(id, part.StateProofSecrets.GetAllKeys()))
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ var flushCount int32
+ const targetFlushes = 5
+ go func() {
+ for i := 0; i < 25; i++ {
+ registry.DeleteExpired(basics.Round(i), config.Consensus[protocol.ConsensusCurrentVersion])
+ registry.Flush(defaultTimeout)
+ if atomic.LoadInt32(&flushCount) < targetFlushes {
+ atomic.AddInt32(&flushCount, 1)
+ }
+ }
+ wg.Done()
+ }()
+
+ for i := 0; i < 25; i++ {
+ repeat:
+ // to not start lookup until deleted some keys
+ if atomic.LoadInt32(&flushCount) < targetFlushes {
+ time.Sleep(time.Second)
+ goto repeat
+ }
+ _, err = registry.GetStateProofSecretsForRound(id2, basics.Round(256))
+ // The error we're trying to avoid is "database is locked", since we're reading from StateProofKeys table,
+ // while the main thread is updating the Rolling table.
+ a.NoError(err)
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ warnings := bufNewLogger.String()
+ deadlineCount := strings.Count(warnings, "tx surpassed expected deadline")
+ a.Empty(deadlineCount, fmt.Sprintf("found %d messages 'tx surpassed expected deadline' but expected 0", deadlineCount))
+ wg.Wait()
+}
+
+func TestParticipationDBInstallWhileReading(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ dbName := strings.Replace(t.Name(), "/", "_", -1)
+
+ dbpair, err := db.OpenErasablePair(dbName + ".sqlite3")
+ a.NoError(err)
+
+ registry, err := makeParticipationRegistry(dbpair, logging.TestingLog(t))
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+ defer registryCloseTest(t, registry, dbName)
+
+ var sampledPartID ParticipationID
+ for i := 0; i < 3; i++ {
+ part := makeTestParticipation(a, 1, 0, 511, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ id, err := registry.Insert(part)
+ if i == 0 {
+ sampledPartID = id
+ }
+ a.NoError(err)
+ a.NoError(registry.AppendKeys(id, part.StateProofSecrets.GetAllKeys()))
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ }
+
+ appendedKeys := make(chan struct{})
+ newPart := makeTestParticipationWithLifetime(a, 1, 0, 3000000, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution, merklesignature.KeyLifetimeDefault)
+ go func() {
+ id, err := registry.Insert(newPart)
+ a.NoError(err)
+ a.NoError(registry.AppendKeys(id, newPart.StateProofSecrets.GetAllKeys()))
+ appendedKeys <- struct{}{}
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, newPart.ID())
+ }()
+
+ <-appendedKeys // Makes sure we start fetching keys after the append keys operation has already started
+ for i := 0; i < 50; i++ {
+ _, err = registry.GetStateProofSecretsForRound(sampledPartID, basics.Round(256))
+ // The error we're trying to avoid is "database is locked", since we're reading from StateProofKeys table,
+ // while a different go routine is installing new keys.
+ a.NoError(err)
+ }
+}
+
// based on BenchmarkOldKeysDeletion
func BenchmarkDeleteExpired(b *testing.B) {
for _, erasable := range []bool{true, false} {
diff --git a/data/account/participation_test.go b/data/account/participation_test.go
index 19eab3f73..9a48bb7a0 100644
--- a/data/account/participation_test.go
+++ b/data/account/participation_test.go
@@ -435,7 +435,7 @@ func BenchmarkFillDB(b *testing.B) {
tmp := config.Consensus[protocol.ConsensusCurrentVersion]
cpy := config.Consensus[protocol.ConsensusCurrentVersion]
- cpy.CompactCertRounds = 256
+ cpy.StateProofInterval = 256
config.Consensus[protocol.ConsensusCurrentVersion] = cpy
defer func() { config.Consensus[protocol.ConsensusCurrentVersion] = tmp }()
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index 691ca5124..6fa69bb15 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -21,6 +21,7 @@ import (
"database/sql"
"errors"
"fmt"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"strings"
)
@@ -53,6 +54,40 @@ type appendKeysOp struct {
id ParticipationID
keys StateProofKeys
}
+type deleteStateProofKeysOp struct {
+ ParticipationID ParticipationID
+ round basics.Round
+}
+
+func (d deleteStateProofKeysOp) apply(db *participationDB) error {
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+
+ // Fetch primary key
+ var pk int
+ row := tx.QueryRow(selectPK, d.ParticipationID[:])
+ err := row.Scan(&pk)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+
+ stmt, err := tx.Prepare(deleteStateProofKeysQuery)
+ if err != nil {
+ return fmt.Errorf("unable to prepare state proof delete: %w", err)
+ }
+ defer stmt.Close()
+
+ _, err = stmt.Exec(pk, d.round)
+ if err != nil {
+ return fmt.Errorf("unable to exec state proof delete (pk,rnd) == (%d,%d): %w", pk, d.round, err)
+ }
+ return nil
+ })
+
+ if err != nil {
+ db.log.Warnf("participationDB unable to delete stateProof key: %w", err)
+ }
+ return err
+}
func makeOpRequest(operation dbOp) opRequest {
return opRequest{operation: operation}
@@ -167,6 +202,11 @@ func (d *deleteOp) apply(db *participationDB) error {
return err
}
+ _, err = tx.Exec(deleteStateProofByPK, pk)
+ if err != nil {
+ return err
+ }
+
return nil
})
return err
diff --git a/data/accountManager.go b/data/accountManager.go
index 7cadd071d..d44091f80 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -45,6 +45,11 @@ type AccountManager struct {
log logging.Logger
}
+// DeleteStateProofKey deletes all keys connected to ParticipationID that came before (including) the given round.
+func (manager *AccountManager) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
+ return manager.registry.DeleteStateProofKeys(id, round)
+}
+
// MakeAccountManager creates a new AccountManager with a custom logger
func MakeAccountManager(log logging.Logger, registry account.ParticipationRegistry) *AccountManager {
manager := &AccountManager{}
@@ -72,12 +77,12 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
}
// StateProofKeys returns a list of Participation accounts, and their stateproof secrets
-func (manager *AccountManager) StateProofKeys(rnd basics.Round) (out []account.StateProofRecordForRound) {
+func (manager *AccountManager) StateProofKeys(rnd basics.Round) (out []account.StateProofSecretsForRound) {
for _, part := range manager.registry.GetAll() {
if part.OverlapsInterval(rnd, rnd) {
- partRndSecrets, err := manager.registry.GetStateProofForRound(part.ParticipationID, rnd)
+ partRndSecrets, err := manager.registry.GetStateProofSecretsForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Warnf("error while loading round secrets from participation registry: %w", err)
+ manager.log.Errorf("error while loading round secrets from participation registry: %w", err)
continue
}
out = append(out, partRndSecrets)
@@ -98,13 +103,20 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
// AddParticipation adds a new account.Participation to be managed.
// The return value indicates if the key has been added (true) or
// if this is a duplicate key (false).
-func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation) bool {
+// if ephemeral is true then the key is not stored in the internal hashmap and
+// will not be deleted by DeleteOldKeys()
+func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation, ephemeral bool) bool {
// Tell the ParticipationRegistry about the Participation. Duplicate entries
// are ignored.
pid, err := manager.registry.Insert(participation.Participation)
if err != nil && err != account.ErrAlreadyInserted {
manager.log.Warnf("Failed to insert participation key.")
}
+
+ if err == account.ErrAlreadyInserted {
+ return false
+ }
+
manager.log.Infof("Inserted key (%s) for account (%s) first valid (%d) last valid (%d)\n",
pid, participation.Parent, participation.FirstValid, participation.LastValid)
@@ -129,7 +141,9 @@ func (manager *AccountManager) AddParticipation(participation account.PersistedP
return false
}
- manager.partKeys[partkeyID] = participation
+ if !ephemeral {
+ manager.partKeys[partkeyID] = participation
+ }
addressString := address.String()
manager.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.PartKeyRegisteredEvent, telemetryspec.PartKeyRegisteredEventDetails{
diff --git a/data/accountManager_test.go b/data/accountManager_test.go
index 1f60c5266..9d464cba1 100644
--- a/data/accountManager_test.go
+++ b/data/accountManager_test.go
@@ -25,10 +25,12 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -129,7 +131,8 @@ func testAccountManagerKeys(t *testing.T, registry account.ParticipationRegistry
databaseFiles = append(databaseFiles, rootFilename)
databaseFiles = append(databaseFiles, partFilename)
- acctManager.AddParticipation(part)
+ // Not ephemeral to be backwards compatible with the test
+ acctManager.AddParticipation(part, false)
}
if _, mocked := acctManager.Registry().(*mocks.MockParticipationRegistry); !mocked {
require.Len(t, acctManager.Keys(basics.Round(1)), numPartKeys, "incorrect number of keys, can happen if test crashes and leaves SQLite files")
@@ -173,3 +176,75 @@ func testAccountManagerKeys(t *testing.T, registry account.ParticipationRegistry
require.Lessf(t, keysTotalDuration, testDuration/100, fmt.Sprintf("the time to aquire the keys via Keys() was %v whereas blocking on keys deletion took %v", keysTotalDuration, testDuration))
t.Logf("Calling AccountManager.Keys() while AccountManager.DeleteOldKeys() was busy, 10 times in a row, resulted in accumulated delay of %v\n", keysTotalDuration)
}
+
+func TestAccountManagerOverlappingStateProofKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+
+ registry, dbName := getRegistryImpl(t, false, true)
+ defer registryCloseTest(t, registry, dbName)
+
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Error)
+
+ acctManager := MakeAccountManager(log, registry)
+
+ databaseFiles := make([]string, 0)
+ defer func() {
+ for _, fileName := range databaseFiles {
+ os.Remove(fileName)
+ os.Remove(fileName + "-shm")
+ os.Remove(fileName + "-wal")
+ os.Remove(fileName + "-journal")
+ }
+ }()
+
+ // Generate 2 participations under the same account
+ store, err := db.MakeAccessor("stateprooftest", false, true)
+ a.NoError(err)
+ root, err := account.GenerateRoot(store)
+ a.NoError(err)
+ part1, err := account.FillDBWithParticipationKeys(store, root.Address(), 0, basics.Round(merklesignature.KeyLifetimeDefault*2), 3)
+ a.NoError(err)
+ store.Close()
+
+ store, err = db.MakeAccessor("stateprooftest", false, true)
+ a.NoError(err)
+ part2, err := account.FillDBWithParticipationKeys(store, root.Address(), basics.Round(merklesignature.KeyLifetimeDefault), basics.Round(merklesignature.KeyLifetimeDefault*3), 3)
+ a.NoError(err)
+ store.Close()
+
+ keys1 := part1.StateProofSecrets.GetAllKeys()
+ keys2 := part2.StateProofSecrets.GetAllKeys()
+
+ // Add participations to the registry and append StateProof keys as well
+ part1ID, err := acctManager.registry.Insert(part1.Participation)
+ a.NoError(err)
+ err = registry.AppendKeys(part1ID, keys1)
+ a.NoError(err)
+
+ err = acctManager.registry.Flush(10 * time.Second)
+ a.NoError(err)
+
+ res := acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault))
+ a.Equal(1, len(res))
+ res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault * 2))
+ a.Equal(1, len(res))
+
+ part2ID, err := acctManager.registry.Insert(part2.Participation)
+ a.NoError(err)
+ err = registry.AppendKeys(part2ID, keys2)
+ a.NoError(err)
+
+ err = acctManager.registry.Flush(10 * time.Second)
+ a.NoError(err)
+
+ res = acctManager.StateProofKeys(0)
+ a.Equal(1, len(res))
+ res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault))
+ a.Equal(2, len(res))
+ res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault * 2))
+ a.Equal(2, len(res))
+ res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault * 3))
+ a.Equal(1, len(res))
+}
diff --git a/data/basics/overflow.go b/data/basics/overflow.go
index b842dc414..5296fd884 100644
--- a/data/basics/overflow.go
+++ b/data/basics/overflow.go
@@ -18,7 +18,7 @@ package basics
import (
"math"
- "math/big"
+ "math/bits"
)
// OverflowTracker is used to track when an operation causes an overflow
@@ -200,17 +200,10 @@ func (t *OverflowTracker) ScalarMulA(a MicroAlgos, b uint64) MicroAlgos {
// Muldiv computes a*b/c. The overflow flag indicates that
// the result was 2^64 or greater.
func Muldiv(a uint64, b uint64, c uint64) (res uint64, overflow bool) {
- var aa big.Int
- aa.SetUint64(a)
-
- var bb big.Int
- bb.SetUint64(b)
-
- var cc big.Int
- cc.SetUint64(c)
-
- aa.Mul(&aa, &bb)
- aa.Div(&aa, &cc)
-
- return aa.Uint64(), !aa.IsUint64()
+ hi, lo := bits.Mul64(a, b)
+ if c <= hi {
+ return 0, true
+ }
+ quo, _ := bits.Div64(hi, lo, c)
+ return quo, false
}
diff --git a/data/basics/ccertpart.go b/data/basics/stateProofParticipant.go
index 481f607c5..5dcc3ad85 100644
--- a/data/basics/ccertpart.go
+++ b/data/basics/stateProofParticipant.go
@@ -55,16 +55,20 @@ type Participant struct {
// be bad for creating SNARK
func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
- weightAsBytes := make([]byte, 8)
- binary.LittleEndian.PutUint64(weightAsBytes, p.Weight)
+ var weightAsBytes [8]byte
+ binary.LittleEndian.PutUint64(weightAsBytes[:], p.Weight)
- publicKeyBytes := p.PK
+ var keyLifetimeBytes [8]byte
+ binary.LittleEndian.PutUint64(keyLifetimeBytes[:], p.PK.KeyLifetime)
- partCommitment := make([]byte, 0, len(weightAsBytes)+len(publicKeyBytes))
- partCommitment = append(partCommitment, weightAsBytes...)
+ publicKeyBytes := p.PK.Commitment
+
+ partCommitment := make([]byte, 0, len(weightAsBytes)+len(publicKeyBytes)+len(keyLifetimeBytes))
+ partCommitment = append(partCommitment, weightAsBytes[:]...)
+ partCommitment = append(partCommitment, keyLifetimeBytes[:]...)
partCommitment = append(partCommitment, publicKeyBytes[:]...)
- return protocol.CompactCertPart, partCommitment
+ return protocol.StateProofPart, partCommitment
}
// ParticipantsArray implements merklearray.Array and is used to commit
diff --git a/data/basics/units_test.go b/data/basics/units_test.go
index efc9bd72c..5e97f4ca5 100644
--- a/data/basics/units_test.go
+++ b/data/basics/units_test.go
@@ -18,6 +18,7 @@ package basics
import (
"math"
+ "math/big"
"testing"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -69,3 +70,63 @@ func TestRoundUpToMultipleOf(t *testing.T) {
}
}
}
+
+func OldMuldiv(a uint64, b uint64, c uint64) (res uint64, overflow bool) {
+ var aa big.Int
+ aa.SetUint64(a)
+
+ var bb big.Int
+ bb.SetUint64(b)
+
+ var cc big.Int
+ cc.SetUint64(c)
+
+ aa.Mul(&aa, &bb)
+ aa.Div(&aa, &cc)
+
+ return aa.Uint64(), !aa.IsUint64()
+}
+
+func BenchmarkOldMuldiv(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64 := uint64(i + 1)
+ OldMuldiv(u64, u64, u64)
+ OldMuldiv(math.MaxUint64, u64, u64)
+ OldMuldiv(u64, math.MaxUint64, u64)
+ OldMuldiv(math.MaxInt64, math.MaxInt64, u64)
+ }
+}
+
+func BenchmarkNewMuldiv(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64 := uint64(i + 1)
+ Muldiv(u64, u64, u64)
+ Muldiv(math.MaxUint64, u64, u64)
+ Muldiv(u64, math.MaxUint64, u64)
+ Muldiv(math.MaxInt64, math.MaxInt64, u64)
+ }
+}
+
+func TestNewMuldiv(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ test := func(a, b, c uint64) {
+ r1, o1 := OldMuldiv(a, b, c)
+ r2, o2 := Muldiv(a, b, c)
+ require.Equal(t, o1, o2)
+ // implementations differ in r1,r2 if overflow. old implemention is
+ // returning an unspecified value
+ if !o1 {
+ require.Equal(t, r1, r2)
+ }
+ }
+ test(1, 2, 3)
+ test(1000000000, 2000000000, 1)
+ test(math.MaxUint64, 3, 4)
+ test(math.MaxUint64, 4, 3)
+ test(3, math.MaxUint64, 4)
+ test(4, math.MaxUint64, 3)
+ test(math.MaxUint64, math.MaxUint64, math.MaxUint64)
+ test(math.MaxUint64, math.MaxUint64, 5)
+}
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 36d83cf95..2b0a2699e 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -97,19 +97,24 @@ func UnmarshalStatus(value string) (s Status, err error) {
return
}
-// OnlineAccountData contains the voting information for a single account.
-//msgp:ignore OnlineAccountData
-type OnlineAccountData struct {
- MicroAlgosWithRewards MicroAlgos
-
- VoteID crypto.OneTimeSignatureVerifier
- SelectionID crypto.VRFVerifier
+// VotingData holds voting-related data
+type VotingData struct {
+ VoteID crypto.OneTimeSignatureVerifier
+ SelectionID crypto.VRFVerifier
+ StateProofID merklesignature.Commitment
VoteFirstValid Round
VoteLastValid Round
VoteKeyDilution uint64
}
+// OnlineAccountData contains the voting information for a single account.
+//msgp:ignore OnlineAccountData
+type OnlineAccountData struct {
+ MicroAlgosWithRewards MicroAlgos
+ VotingData
+}
+
// AccountData contains the data associated with a given address.
//
// This includes the account balance, cryptographic public keys,
@@ -163,7 +168,7 @@ type AccountData struct {
VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
SelectionID crypto.VRFVerifier `codec:"sel"`
- StateProofID merklesignature.Verifier `codec:"stprf"`
+ StateProofID merklesignature.Commitment `codec:"stprf"`
VoteFirstValid Round `codec:"voteFst"`
VoteLastValid Round `codec:"voteLst"`
@@ -431,22 +436,23 @@ func PendingRewards(ot *OverflowTracker, proto config.ConsensusParams, microAlgo
func WithUpdatedRewards(
proto config.ConsensusParams, status Status, microAlgosIn MicroAlgos, rewardedMicroAlgosIn MicroAlgos, rewardsBaseIn uint64, rewardsLevelIn uint64,
) (MicroAlgos, MicroAlgos, uint64) {
- if status != NotParticipating {
- var ot OverflowTracker
- rewardsUnits := microAlgosIn.RewardUnits(proto)
- rewardsDelta := ot.Sub(rewardsLevelIn, rewardsBaseIn)
- rewards := MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)}
- microAlgosOut := ot.AddA(microAlgosIn, rewards)
- if ot.Overflowed {
- logging.Base().Panicf("AccountData.WithUpdatedRewards(): overflowed account balance when applying rewards %v + %d*(%d-%d)", microAlgosIn, rewardsUnits, rewardsLevelIn, rewardsBaseIn)
- }
- rewardsBaseOut := rewardsLevelIn
- // The total reward over the lifetime of the account could exceed a 64-bit value. As a result
- // this rewardAlgos counter could potentially roll over.
- rewardedMicroAlgosOut := MicroAlgos{Raw: rewardedMicroAlgosIn.Raw + rewards.Raw}
- return microAlgosOut, rewardedMicroAlgosOut, rewardsBaseOut
+ if status == NotParticipating {
+ return microAlgosIn, rewardedMicroAlgosIn, rewardsBaseIn
}
- return microAlgosIn, rewardedMicroAlgosIn, rewardsBaseIn
+
+ var ot OverflowTracker
+ rewardsUnits := microAlgosIn.RewardUnits(proto)
+ rewardsDelta := ot.Sub(rewardsLevelIn, rewardsBaseIn)
+ rewards := MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)}
+ microAlgosOut := ot.AddA(microAlgosIn, rewards)
+ if ot.Overflowed {
+ logging.Base().Panicf("AccountData.WithUpdatedRewards(): overflowed account balance when applying rewards %v + %d*(%d-%d)", microAlgosIn, rewardsUnits, rewardsLevelIn, rewardsBaseIn)
+ }
+ rewardsBaseOut := rewardsLevelIn
+ // The total reward over the lifetime of the account could exceed a 64-bit value. As a result
+ // this rewardAlgos counter could potentially roll over.
+ rewardedMicroAlgosOut := MicroAlgos{Raw: rewardedMicroAlgosIn.Raw + rewards.Raw}
+ return microAlgosOut, rewardedMicroAlgosOut, rewardsBaseOut
}
// WithUpdatedRewards returns an updated number of algos in an AccountData
@@ -522,12 +528,14 @@ func (u AccountData) OnlineAccountData() OnlineAccountData {
return OnlineAccountData{
MicroAlgosWithRewards: u.MicroAlgos,
-
- VoteID: u.VoteID,
- SelectionID: u.SelectionID,
- VoteFirstValid: u.VoteFirstValid,
- VoteLastValid: u.VoteLastValid,
- VoteKeyDilution: u.VoteKeyDilution,
+ VotingData: VotingData{
+ VoteID: u.VoteID,
+ SelectionID: u.SelectionID,
+ StateProofID: u.StateProofID,
+ VoteFirstValid: u.VoteFirstValid,
+ VoteLastValid: u.VoteLastValid,
+ VoteKeyDilution: u.VoteKeyDilution,
+ },
}
}
@@ -598,26 +606,26 @@ func (u AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint6
// on how recently the account has been touched (our rewards do not implement
// compounding). However, online accounts have to periodically renew
// participation keys, so the scale of the inconsistency is small.
-func NormalizedOnlineAccountBalance(status Status, rewardsBase uint64, microAlgos MicroAlgos, proto config.ConsensusParams) uint64 {
+func NormalizedOnlineAccountBalance(status Status, rewardsBase uint64, microAlgos MicroAlgos, genesisProto config.ConsensusParams) uint64 {
if status != Online {
return 0
}
// If this account had one RewardUnit of microAlgos in round 0, it would
// have perRewardUnit microAlgos at the account's current rewards level.
- perRewardUnit := rewardsBase + proto.RewardUnit
+ perRewardUnit := rewardsBase + genesisProto.RewardUnit
// To normalize, we compute, mathematically,
// u.MicroAlgos / perRewardUnit * proto.RewardUnit, as
// (u.MicroAlgos * proto.RewardUnit) / perRewardUnit.
- norm, overflowed := Muldiv(microAlgos.ToUint64(), proto.RewardUnit, perRewardUnit)
+ norm, overflowed := Muldiv(microAlgos.ToUint64(), genesisProto.RewardUnit, perRewardUnit)
// Mathematically should be impossible to overflow
// because perRewardUnit >= proto.RewardUnit, as long
// as u.RewardBase isn't huge enough to cause overflow..
if overflowed {
logging.Base().Panicf("overflow computing normalized balance %d * %d / (%d + %d)",
- microAlgos.ToUint64(), proto.RewardUnit, rewardsBase, proto.RewardUnit)
+ microAlgos.ToUint64(), genesisProto.RewardUnit, rewardsBase, genesisProto.RewardUnit)
}
return norm
diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go
index ddaf59315..050ea2882 100644
--- a/data/basics/userBalance_test.go
+++ b/data/basics/userBalance_test.go
@@ -109,7 +109,7 @@ func makeString(len int) string {
func getSampleAccountData() AccountData {
oneTimeSecrets := crypto.GenerateOneTimeSignatureSecrets(0, 1)
vrfSecrets := crypto.GenerateVRFSecrets()
- var stateProofID merklesignature.Verifier
+ var stateProofID merklesignature.Commitment
crypto.RandBytes(stateProofID[:])
return AccountData{
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index 584e0394d..702344235 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -119,10 +119,10 @@ type (
// started being supported).
TxnCounter uint64 `codec:"tc"`
- // CompactCert tracks the state of compact certs, potentially
- // for multiple types of certs.
- //msgp:sort protocol.CompactCertType protocol.SortCompactCertType
- CompactCert map[protocol.CompactCertType]CompactCertState `codec:"cc,allocbound=protocol.NumCompactCertTypes"`
+ // StateProofTracking tracks the status of the state proofs, potentially
+ // for multiple types of ASPs (Algorand's State Proofs).
+ //msgp:sort protocol.StateProofType protocol.SortStateProofType
+ StateProofTracking map[protocol.StateProofType]StateProofTrackingData `codec:"spt,allocbound=protocol.NumStateProofTypes"`
// ParticipationUpdates contains the information needed to mark
// certain accounts offline because their participation keys expired
@@ -214,27 +214,26 @@ type (
NextProtocolSwitchOn basics.Round `codec:"nextswitch"`
}
- // CompactCertState tracks the state of compact certificates.
- CompactCertState struct {
+ // StateProofTrackingData tracks the status of state proofs.
+ StateProofTrackingData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- // CompactCertVoters is the root of a Merkle tree containing the
- // online accounts that will help sign a compact certificate. The
- // Merkle root, and the compact certificate, happen on blocks that
- // are a multiple of ConsensusParams.CompactCertRounds. For blocks
- // that are not a multiple of ConsensusParams.CompactCertRounds,
+ // StateProofVotersCommitment is the root of a vector commitment containing the
+ // online accounts that will help sign a state proof. The
+ // VC root, and the state proof, happen on blocks that
+ // are a multiple of ConsensusParams.StateProofRounds. For blocks
+ // that are not a multiple of ConsensusParams.StateProofRounds,
// this value is zero.
- CompactCertVoters crypto.GenericDigest `codec:"v"`
+ StateProofVotersCommitment crypto.GenericDigest `codec:"v"`
- // CompactCertVotersTotal is the total number of microalgos held by
- // the accounts in CompactCertVoters (or zero, if the merkle root is
- // zero). This is intended for computing the threshold of votes to
- // expect from CompactCertVoters.
- CompactCertVotersTotal basics.MicroAlgos `codec:"t"`
+ // StateProofOnlineTotalWeight is the total number of microalgos held by the online accounts
+ // during the StateProof round (or zero, if the merkle root is zero - no commitment for StateProof voters).
+ // This is intended for computing the threshold of votes to expect from StateProofVotersCommitment.
+ StateProofOnlineTotalWeight basics.MicroAlgos `codec:"t"`
- // CompactCertNextRound is the next round for which we will accept
- // a CompactCert transaction.
- CompactCertNextRound basics.Round `codec:"n"`
+ // StateProofNextRound is the next round for which we will accept
+ // a StateProof transaction.
+ StateProofNextRound basics.Round `codec:"n"`
}
// A Block contains the Payset and metadata corresponding to a given Round.
diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go
index d57736701..5330c85cd 100644
--- a/data/bookkeeping/block_test.go
+++ b/data/bookkeeping/block_test.go
@@ -863,7 +863,7 @@ func TestBlockHeader_Serialization(t *testing.T) {
a := require.New(t)
// This serialized block header was generated from V32 e2e test, using the old BlockHeader struct which contains only TxnCommitments SHA512_256 value
- serializedBlkHdr := "8fa26363810081a16ecd0200a466656573c42007dacb4b6d9ed141b17576bd459ae6421d486da3d4ef2247c409a396b82ea221a466726163ce1dcd64fea367656ea7746573742d7631a26768c42032cb340d569e1f9e4d9690c1ba04d77759bae6f353e13af1becf42dcd7d3bdeba470726576c420a2270bc90e3cc48d56081b3b85c15d6a10e14303a6d42ca2537954ce90beec40a570726f746fa6667574757265a472617465ce0ee6b27fa3726e6402a6727763616c72ce0007a120a3727764c420ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa473656564c420a19005a25abad1ad28ec2298baeda9a17693a9ef12127a5ff3e5fa9258c7e9eba2746306a27473ce625ed0eaa374786ec420508f9330176e6064767b0fb7eb0e8bf68ffbaf995a4c7b37ca0217c5a82b4a60"
+ serializedBlkHdr := "8fa3737074810081a16ecd0200a466656573c42007dacb4b6d9ed141b17576bd459ae6421d486da3d4ef2247c409a396b82ea221a466726163ce1dcd64fea367656ea7746573742d7631a26768c42032cb340d569e1f9e4d9690c1ba04d77759bae6f353e13af1becf42dcd7d3bdeba470726576c420a2270bc90e3cc48d56081b3b85c15d6a10e14303a6d42ca2537954ce90beec40a570726f746fa6667574757265a472617465ce0ee6b27fa3726e6402a6727763616c72ce0007a120a3727764c420ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa473656564c420a19005a25abad1ad28ec2298baeda9a17693a9ef12127a5ff3e5fa9258c7e9eba2746306a27473ce625ed0eaa374786ec420508f9330176e6064767b0fb7eb0e8bf68ffbaf995a4c7b37ca0217c5a82b4a60"
bytesBlkHdr, err := hex.DecodeString(serializedBlkHdr)
a.NoError(err)
diff --git a/data/bookkeeping/lightBlockHeader.go b/data/bookkeeping/lightBlockHeader.go
new file mode 100644
index 000000000..e9530faa1
--- /dev/null
+++ b/data/bookkeeping/lightBlockHeader.go
@@ -0,0 +1,61 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// LightBlockHeader represents a minimal block header. It contains all the necessary fields
+// for verifying proofs on transactions.
+// In addition, this struct is designed to be used on environments where only SHA256 function exists
+type LightBlockHeader struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ /*
+ The seed is to mitigate against the (remote) possibility that an attacker can put itself in better position to
+ find a collision in the future -- perhaps with quantum -- e.g., by doing some precomputation,
+ knowing or even controlling the data to be hashed, etc. Starting the hash data with a value that is
+ uncontrollable and unpredictable (to today’s attackers) makes the attacker’s task more like breaking 2nd
+ preimage resistance (2PR/TCR), versus the easier goal of merely breaking collision resistance.
+ In addition, we make sure that the Seed (The unpredictable value) would be the first field that gets
+ hashed (give it the lowest codec value in the LightBlockHeader struct) to mitigate a collision attack
+ on the merkle damgard construction.
+ */
+ Seed committee.Seed `codec:"0"`
+ Round basics.Round `codec:"r"`
+ GenesisHash crypto.Digest `codec:"gh"`
+ Sha256TxnCommitment crypto.GenericDigest `codec:"tc,allocbound=crypto.Sha256Size"`
+}
+
+// ToLightBlockHeader creates returns a LightBlockHeader from a given block header
+func (bh *BlockHeader) ToLightBlockHeader() LightBlockHeader {
+ return LightBlockHeader{
+ Seed: bh.Seed,
+ GenesisHash: bh.GenesisHash,
+ Round: bh.Round,
+ Sha256TxnCommitment: bh.Sha256Commitment[:],
+ }
+}
+
+// ToBeHashed implements the crypto.Hashable interface
+func (bh *LightBlockHeader) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.BlockHeader256, protocol.Encode(bh)
+}
diff --git a/data/bookkeeping/lightBlockHeader_test.go b/data/bookkeeping/lightBlockHeader_test.go
new file mode 100644
index 000000000..c9d39c008
--- /dev/null
+++ b/data/bookkeeping/lightBlockHeader_test.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestConvertSha256Header(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var gh crypto.Digest
+ crypto.RandBytes(gh[:])
+
+ var txnCommit TxnCommitments
+ crypto.RandBytes(txnCommit.Sha256Commitment[:])
+ blockHeader := BlockHeader{Round: 200, GenesisHash: gh, TxnCommitments: txnCommit}
+ sha256Header := blockHeader.ToLightBlockHeader()
+
+ a.Equal(basics.Round(200), sha256Header.Round)
+ a.Equal(txnCommit.Sha256Commitment[:], []byte(sha256Header.Sha256TxnCommitment))
+ a.Equal(gh, sha256Header.GenesisHash)
+}
+
+func TestFirstFieldsAreCommitteeSeed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var gh crypto.Digest
+ crypto.RandBytes(gh[:])
+
+ blockHeader := LightBlockHeader{
+ Seed: committee.Seed{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a'},
+ Round: 200,
+ GenesisHash: gh,
+ }
+
+ o := protocol.Encode(&blockHeader)
+
+ a.True(strings.HasPrefix(string(o[5:]), "123456789a"))
+}
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index 0e994fcff..de90680aa 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -38,14 +38,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// CompactCertState
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// Genesis
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -62,6 +54,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// LightBlockHeader
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// ParticipationUpdates
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -78,6 +78,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// StateProofTrackingData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// TxnCommitments
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -101,75 +109,75 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0004Len := uint32(26)
var zb0004Mask uint32 /* 31 bits */
- if len((*z).BlockHeader.CompactCert) == 0 {
+ if (*z).BlockHeader.RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x20
}
- if (*z).BlockHeader.RewardsState.RewardsLevel == 0 {
+ if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40
}
- if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() {
+ if (*z).BlockHeader.RewardsState.RewardsResidue == 0 {
zb0004Len--
zb0004Mask |= 0x80
}
- if (*z).BlockHeader.RewardsState.RewardsResidue == 0 {
+ if (*z).BlockHeader.GenesisID == "" {
zb0004Len--
zb0004Mask |= 0x100
}
- if (*z).BlockHeader.GenesisID == "" {
+ if (*z).BlockHeader.GenesisHash.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200
}
- if (*z).BlockHeader.GenesisHash.MsgIsZero() {
+ if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400
}
- if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
+ if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800
}
- if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
+ if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000
}
- if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
+ if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
zb0004Len--
zb0004Mask |= 0x2000
}
- if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
+ if len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
zb0004Len--
zb0004Mask |= 0x4000
}
- if len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ if (*z).BlockHeader.Branch.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x8000
}
- if (*z).BlockHeader.Branch.MsgIsZero() {
+ if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x10000
}
- if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
+ if (*z).BlockHeader.RewardsState.RewardsRate == 0 {
zb0004Len--
zb0004Mask |= 0x20000
}
- if (*z).BlockHeader.RewardsState.RewardsRate == 0 {
+ if (*z).BlockHeader.Round.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40000
}
- if (*z).BlockHeader.Round.MsgIsZero() {
+ if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80000
}
- if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
+ if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x100000
}
- if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
+ if (*z).BlockHeader.Seed.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200000
}
- if (*z).BlockHeader.Seed.MsgIsZero() {
+ if len((*z).BlockHeader.StateProofTracking) == 0 {
zb0004Len--
zb0004Mask |= 0x400000
}
@@ -209,71 +217,51 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendMapHeader(o, zb0004Len)
if zb0004Len != 0 {
if (zb0004Mask & 0x20) == 0 { // if not empty
- // string "cc"
- o = append(o, 0xa2, 0x63, 0x63)
- if (*z).BlockHeader.CompactCert == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).BlockHeader.CompactCert)))
- }
- zb0001_keys := make([]protocol.CompactCertType, 0, len((*z).BlockHeader.CompactCert))
- for zb0001 := range (*z).BlockHeader.CompactCert {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(protocol.SortCompactCertType(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).BlockHeader.CompactCert[zb0001]
- _ = zb0002
- o = zb0001.MarshalMsg(o)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0004Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0004Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0004Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).BlockHeader.GenesisID)
}
- if (zb0004Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0004Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0004Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "partupdrmv"
o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
@@ -285,41 +273,61 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
}
}
- if (zb0004Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0004Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).BlockHeader.Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).BlockHeader.Seed.MarshalMsg(o)
}
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
+ // string "spt"
+ o = append(o, 0xa3, 0x73, 0x70, 0x74)
+ if (*z).BlockHeader.StateProofTracking == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).BlockHeader.StateProofTracking)))
+ }
+ zb0001_keys := make([]protocol.StateProofType, 0, len((*z).BlockHeader.StateProofTracking))
+ for zb0001 := range (*z).BlockHeader.StateProofTracking {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(protocol.SortStateProofType(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).BlockHeader.StateProofTracking[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
@@ -572,34 +580,34 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0007 bool
zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ if zb0006 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
if zb0007 {
- (*z).BlockHeader.CompactCert = nil
- } else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
+ (*z).BlockHeader.StateProofTracking = nil
+ } else if (*z).BlockHeader.StateProofTracking == nil {
+ (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0006)
}
for zb0006 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 StateProofTrackingData
zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert", zb0001)
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking", zb0001)
return
}
- (*z).BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).BlockHeader.StateProofTracking[zb0001] = zb0002
}
}
if zb0004 > 0 {
@@ -800,39 +808,39 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TxnCounter")
return
}
- case "cc":
+ case "spt":
var zb0010 int
var zb0011 bool
zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0010 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "CompactCert")
+ if zb0010 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
if zb0011 {
- (*z).BlockHeader.CompactCert = nil
- } else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
+ (*z).BlockHeader.StateProofTracking = nil
+ } else if (*z).BlockHeader.StateProofTracking == nil {
+ (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0010)
}
for zb0010 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 StateProofTrackingData
zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert", zb0001)
+ err = msgp.WrapError(err, "StateProofTracking", zb0001)
return
}
- (*z).BlockHeader.CompactCert[zb0001] = zb0002
+ (*z).BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
var zb0012 int
@@ -887,9 +895,9 @@ func (_ *Block) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Block) Msgsize() (s int) {
- s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 3 + msgp.MapHeaderSize
- if (*z).BlockHeader.CompactCert != nil {
- for zb0001, zb0002 := range (*z).BlockHeader.CompactCert {
+ s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).BlockHeader.TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize
+ if (*z).BlockHeader.StateProofTracking != nil {
+ for zb0001, zb0002 := range (*z).BlockHeader.StateProofTracking {
_ = zb0001
_ = zb0002
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
@@ -905,7 +913,7 @@ func (z *Block) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Block) MsgIsZero() bool {
- return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.CompactCert) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero())
+ return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.StateProofTracking) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -942,75 +950,75 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0004Len := uint32(25)
var zb0004Mask uint32 /* 30 bits */
- if len((*z).CompactCert) == 0 {
+ if (*z).RewardsState.RewardsLevel == 0 {
zb0004Len--
zb0004Mask |= 0x20
}
- if (*z).RewardsState.RewardsLevel == 0 {
+ if (*z).RewardsState.FeeSink.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40
}
- if (*z).RewardsState.FeeSink.MsgIsZero() {
+ if (*z).RewardsState.RewardsResidue == 0 {
zb0004Len--
zb0004Mask |= 0x80
}
- if (*z).RewardsState.RewardsResidue == 0 {
+ if (*z).GenesisID == "" {
zb0004Len--
zb0004Mask |= 0x100
}
- if (*z).GenesisID == "" {
+ if (*z).GenesisHash.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200
}
- if (*z).GenesisHash.MsgIsZero() {
+ if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x400
}
- if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
+ if (*z).UpgradeState.NextProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x800
}
- if (*z).UpgradeState.NextProtocol.MsgIsZero() {
+ if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x1000
}
- if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
+ if (*z).UpgradeState.NextProtocolApprovals == 0 {
zb0004Len--
zb0004Mask |= 0x2000
}
- if (*z).UpgradeState.NextProtocolApprovals == 0 {
+ if len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
zb0004Len--
zb0004Mask |= 0x4000
}
- if len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ if (*z).Branch.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x8000
}
- if (*z).Branch.MsgIsZero() {
+ if (*z).UpgradeState.CurrentProtocol.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x10000
}
- if (*z).UpgradeState.CurrentProtocol.MsgIsZero() {
+ if (*z).RewardsState.RewardsRate == 0 {
zb0004Len--
zb0004Mask |= 0x20000
}
- if (*z).RewardsState.RewardsRate == 0 {
+ if (*z).Round.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x40000
}
- if (*z).Round.MsgIsZero() {
+ if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x80000
}
- if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() {
+ if (*z).RewardsState.RewardsPool.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x100000
}
- if (*z).RewardsState.RewardsPool.MsgIsZero() {
+ if (*z).Seed.MsgIsZero() {
zb0004Len--
zb0004Mask |= 0x200000
}
- if (*z).Seed.MsgIsZero() {
+ if len((*z).StateProofTracking) == 0 {
zb0004Len--
zb0004Mask |= 0x400000
}
@@ -1046,71 +1054,51 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendMapHeader(o, zb0004Len)
if zb0004Len != 0 {
if (zb0004Mask & 0x20) == 0 { // if not empty
- // string "cc"
- o = append(o, 0xa2, 0x63, 0x63)
- if (*z).CompactCert == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).CompactCert)))
- }
- zb0001_keys := make([]protocol.CompactCertType, 0, len((*z).CompactCert))
- for zb0001 := range (*z).CompactCert {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(protocol.SortCompactCertType(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).CompactCert[zb0001]
- _ = zb0002
- o = zb0001.MarshalMsg(o)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsLevel)
}
- if (zb0004Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0004Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsResidue)
}
- if (zb0004Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).GenesisID)
}
- if (zb0004Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).GenesisHash.MarshalMsg(o)
}
- if (zb0004Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0004Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0004Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).UpgradeState.NextProtocolApprovals)
}
- if (zb0004Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "partupdrmv"
o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
if (*z).ParticipationUpdates.ExpiredParticipationAccounts == nil {
@@ -1122,41 +1110,61 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
}
}
- if (zb0004Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Branch.MarshalMsg(o)
}
- if (zb0004Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0004Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsRate)
}
- if (zb0004Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Round.MarshalMsg(o)
}
- if (zb0004Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0004Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0004Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Seed.MarshalMsg(o)
}
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
+ // string "spt"
+ o = append(o, 0xa3, 0x73, 0x70, 0x74)
+ if (*z).StateProofTracking == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).StateProofTracking)))
+ }
+ zb0001_keys := make([]protocol.StateProofType, 0, len((*z).StateProofTracking))
+ for zb0001 := range (*z).StateProofTracking {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(protocol.SortStateProofType(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).StateProofTracking[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
@@ -1404,34 +1412,34 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0007 bool
zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ if zb0006 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
if zb0007 {
- (*z).CompactCert = nil
- } else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
+ (*z).StateProofTracking = nil
+ } else if (*z).StateProofTracking == nil {
+ (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0006)
}
for zb0006 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 StateProofTrackingData
zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCert", zb0001)
+ err = msgp.WrapError(err, "struct-from-array", "StateProofTracking", zb0001)
return
}
- (*z).CompactCert[zb0001] = zb0002
+ (*z).StateProofTracking[zb0001] = zb0002
}
}
if zb0004 > 0 {
@@ -1624,39 +1632,39 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TxnCounter")
return
}
- case "cc":
+ case "spt":
var zb0010 int
var zb0011 bool
zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0010 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
- err = msgp.WrapError(err, "CompactCert")
+ if zb0010 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumStateProofTypes))
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
if zb0011 {
- (*z).CompactCert = nil
- } else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
+ (*z).StateProofTracking = nil
+ } else if (*z).StateProofTracking == nil {
+ (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0010)
}
for zb0010 > 0 {
- var zb0001 protocol.CompactCertType
- var zb0002 CompactCertState
+ var zb0001 protocol.StateProofType
+ var zb0002 StateProofTrackingData
zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert")
+ err = msgp.WrapError(err, "StateProofTracking")
return
}
bts, err = zb0002.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CompactCert", zb0001)
+ err = msgp.WrapError(err, "StateProofTracking", zb0001)
return
}
- (*z).CompactCert[zb0001] = zb0002
+ (*z).StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
var zb0012 int
@@ -1705,9 +1713,9 @@ func (_ *BlockHeader) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BlockHeader) Msgsize() (s int) {
- s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 3 + msgp.MapHeaderSize
- if (*z).CompactCert != nil {
- for zb0001, zb0002 := range (*z).CompactCert {
+ s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnCommitments.NativeSha512_256Commitment.Msgsize() + 7 + (*z).TxnCommitments.Sha256Commitment.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + msgp.MapHeaderSize
+ if (*z).StateProofTracking != nil {
+ for zb0001, zb0002 := range (*z).StateProofTracking {
_ = zb0001
_ = zb0002
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
@@ -1722,159 +1730,7 @@ func (z *BlockHeader) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *BlockHeader) MsgIsZero() bool {
- return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).CompactCert) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *CompactCertState) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if (*z).CompactCertNextRound.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).CompactCertVotersTotal.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).CompactCertVoters.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "n"
- o = append(o, 0xa1, 0x6e)
- o = (*z).CompactCertNextRound.MarshalMsg(o)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- o = (*z).CompactCertVotersTotal.MarshalMsg(o)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "v"
- o = append(o, 0xa1, 0x76)
- o = (*z).CompactCertVoters.MarshalMsg(o)
- }
- }
- return
-}
-
-func (_ *CompactCertState) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactCertState)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *CompactCertState) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).CompactCertVoters.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCertVoters")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).CompactCertVotersTotal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCertVotersTotal")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).CompactCertNextRound.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompactCertNextRound")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = CompactCertState{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "v":
- bts, err = (*z).CompactCertVoters.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CompactCertVoters")
- return
- }
- case "t":
- bts, err = (*z).CompactCertVotersTotal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CompactCertVotersTotal")
- return
- }
- case "n":
- bts, err = (*z).CompactCertNextRound.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CompactCertNextRound")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *CompactCertState) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactCertState)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *CompactCertState) Msgsize() (s int) {
- s = 1 + 2 + (*z).CompactCertVoters.Msgsize() + 2 + (*z).CompactCertVotersTotal.Msgsize() + 2 + (*z).CompactCertNextRound.Msgsize()
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *CompactCertState) MsgIsZero() bool {
- return ((*z).CompactCertVoters.MsgIsZero()) && ((*z).CompactCertVotersTotal.MsgIsZero()) && ((*z).CompactCertNextRound.MsgIsZero())
+ return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).StateProofTracking) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2349,6 +2205,181 @@ func (z *GenesisAllocation) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(4)
+ var zb0001Mask uint8 /* 5 bits */
+ if (*z).Seed.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ if (*z).GenesisHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).Round.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).Sha256TxnCommitment.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "0"
+ o = append(o, 0xa1, 0x30)
+ o = (*z).Seed.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "gh"
+ o = append(o, 0xa2, 0x67, 0x68)
+ o = (*z).GenesisHash.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ o = (*z).Round.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "tc"
+ o = append(o, 0xa2, 0x74, 0x63)
+ o = (*z).Sha256TxnCommitment.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *LightBlockHeader) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*LightBlockHeader)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *LightBlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Seed.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Seed")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).GenesisHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Sha256TxnCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Sha256TxnCommitment")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = LightBlockHeader{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "0":
+ bts, err = (*z).Seed.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Seed")
+ return
+ }
+ case "r":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "gh":
+ bts, err = (*z).GenesisHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisHash")
+ return
+ }
+ case "tc":
+ bts, err = (*z).Sha256TxnCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Sha256TxnCommitment")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *LightBlockHeader) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*LightBlockHeader)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *LightBlockHeader) Msgsize() (s int) {
+ s = 1 + 2 + (*z).Seed.Msgsize() + 2 + (*z).Round.Msgsize() + 3 + (*z).GenesisHash.Msgsize() + 3 + (*z).Sha256TxnCommitment.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *LightBlockHeader) MsgIsZero() bool {
+ return ((*z).Seed.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *ParticipationUpdates) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -2728,6 +2759,158 @@ func (z *RewardsState) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *StateProofTrackingData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(3)
+ var zb0001Mask uint8 /* 4 bits */
+ if (*z).StateProofNextRound.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).StateProofOnlineTotalWeight.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).StateProofVotersCommitment.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = (*z).StateProofNextRound.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "t"
+ o = append(o, 0xa1, 0x74)
+ o = (*z).StateProofOnlineTotalWeight.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = (*z).StateProofVotersCommitment.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *StateProofTrackingData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofTrackingData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *StateProofTrackingData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofVotersCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofVotersCommitment")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofOnlineTotalWeight.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofOnlineTotalWeight")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofNextRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofNextRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = StateProofTrackingData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "v":
+ bts, err = (*z).StateProofVotersCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofVotersCommitment")
+ return
+ }
+ case "t":
+ bts, err = (*z).StateProofOnlineTotalWeight.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofOnlineTotalWeight")
+ return
+ }
+ case "n":
+ bts, err = (*z).StateProofNextRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofNextRound")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *StateProofTrackingData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofTrackingData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *StateProofTrackingData) Msgsize() (s int) {
+ s = 1 + 2 + (*z).StateProofVotersCommitment.Msgsize() + 2 + (*z).StateProofOnlineTotalWeight.Msgsize() + 2 + (*z).StateProofNextRound.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *StateProofTrackingData) MsgIsZero() bool {
+ return ((*z).StateProofVotersCommitment.MsgIsZero()) && ((*z).StateProofOnlineTotalWeight.MsgIsZero()) && ((*z).StateProofNextRound.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *TxnCommitments) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/data/bookkeeping/msgp_gen_test.go b/data/bookkeeping/msgp_gen_test.go
index cdf62e5c7..1f61ae825 100644
--- a/data/bookkeeping/msgp_gen_test.go
+++ b/data/bookkeeping/msgp_gen_test.go
@@ -134,9 +134,9 @@ func BenchmarkUnmarshalBlockHeader(b *testing.B) {
}
}
-func TestMarshalUnmarshalCompactCertState(t *testing.T) {
+func TestMarshalUnmarshalGenesis(t *testing.T) {
partitiontest.PartitionTest(t)
- v := CompactCertState{}
+ v := Genesis{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -155,12 +155,12 @@ func TestMarshalUnmarshalCompactCertState(t *testing.T) {
}
}
-func TestRandomizedEncodingCompactCertState(t *testing.T) {
- protocol.RunEncodingTest(t, &CompactCertState{})
+func TestRandomizedEncodingGenesis(t *testing.T) {
+ protocol.RunEncodingTest(t, &Genesis{})
}
-func BenchmarkMarshalMsgCompactCertState(b *testing.B) {
- v := CompactCertState{}
+func BenchmarkMarshalMsgGenesis(b *testing.B) {
+ v := Genesis{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -168,8 +168,8 @@ func BenchmarkMarshalMsgCompactCertState(b *testing.B) {
}
}
-func BenchmarkAppendMsgCompactCertState(b *testing.B) {
- v := CompactCertState{}
+func BenchmarkAppendMsgGenesis(b *testing.B) {
+ v := Genesis{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -180,8 +180,8 @@ func BenchmarkAppendMsgCompactCertState(b *testing.B) {
}
}
-func BenchmarkUnmarshalCompactCertState(b *testing.B) {
- v := CompactCertState{}
+func BenchmarkUnmarshalGenesis(b *testing.B) {
+ v := Genesis{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -194,9 +194,9 @@ func BenchmarkUnmarshalCompactCertState(b *testing.B) {
}
}
-func TestMarshalUnmarshalGenesis(t *testing.T) {
+func TestMarshalUnmarshalGenesisAllocation(t *testing.T) {
partitiontest.PartitionTest(t)
- v := Genesis{}
+ v := GenesisAllocation{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -215,12 +215,12 @@ func TestMarshalUnmarshalGenesis(t *testing.T) {
}
}
-func TestRandomizedEncodingGenesis(t *testing.T) {
- protocol.RunEncodingTest(t, &Genesis{})
+func TestRandomizedEncodingGenesisAllocation(t *testing.T) {
+ protocol.RunEncodingTest(t, &GenesisAllocation{})
}
-func BenchmarkMarshalMsgGenesis(b *testing.B) {
- v := Genesis{}
+func BenchmarkMarshalMsgGenesisAllocation(b *testing.B) {
+ v := GenesisAllocation{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -228,8 +228,8 @@ func BenchmarkMarshalMsgGenesis(b *testing.B) {
}
}
-func BenchmarkAppendMsgGenesis(b *testing.B) {
- v := Genesis{}
+func BenchmarkAppendMsgGenesisAllocation(b *testing.B) {
+ v := GenesisAllocation{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -240,8 +240,8 @@ func BenchmarkAppendMsgGenesis(b *testing.B) {
}
}
-func BenchmarkUnmarshalGenesis(b *testing.B) {
- v := Genesis{}
+func BenchmarkUnmarshalGenesisAllocation(b *testing.B) {
+ v := GenesisAllocation{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -254,9 +254,9 @@ func BenchmarkUnmarshalGenesis(b *testing.B) {
}
}
-func TestMarshalUnmarshalGenesisAllocation(t *testing.T) {
+func TestMarshalUnmarshalLightBlockHeader(t *testing.T) {
partitiontest.PartitionTest(t)
- v := GenesisAllocation{}
+ v := LightBlockHeader{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -275,12 +275,12 @@ func TestMarshalUnmarshalGenesisAllocation(t *testing.T) {
}
}
-func TestRandomizedEncodingGenesisAllocation(t *testing.T) {
- protocol.RunEncodingTest(t, &GenesisAllocation{})
+func TestRandomizedEncodingLightBlockHeader(t *testing.T) {
+ protocol.RunEncodingTest(t, &LightBlockHeader{})
}
-func BenchmarkMarshalMsgGenesisAllocation(b *testing.B) {
- v := GenesisAllocation{}
+func BenchmarkMarshalMsgLightBlockHeader(b *testing.B) {
+ v := LightBlockHeader{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -288,8 +288,8 @@ func BenchmarkMarshalMsgGenesisAllocation(b *testing.B) {
}
}
-func BenchmarkAppendMsgGenesisAllocation(b *testing.B) {
- v := GenesisAllocation{}
+func BenchmarkAppendMsgLightBlockHeader(b *testing.B) {
+ v := LightBlockHeader{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -300,8 +300,8 @@ func BenchmarkAppendMsgGenesisAllocation(b *testing.B) {
}
}
-func BenchmarkUnmarshalGenesisAllocation(b *testing.B) {
- v := GenesisAllocation{}
+func BenchmarkUnmarshalLightBlockHeader(b *testing.B) {
+ v := LightBlockHeader{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -434,6 +434,66 @@ func BenchmarkUnmarshalRewardsState(b *testing.B) {
}
}
+func TestMarshalUnmarshalStateProofTrackingData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := StateProofTrackingData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingStateProofTrackingData(t *testing.T) {
+ protocol.RunEncodingTest(t, &StateProofTrackingData{})
+}
+
+func BenchmarkMarshalMsgStateProofTrackingData(b *testing.B) {
+ v := StateProofTrackingData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgStateProofTrackingData(b *testing.B) {
+ v := StateProofTrackingData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalStateProofTrackingData(b *testing.B) {
+ v := StateProofTrackingData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalTxnCommitments(t *testing.T) {
partitiontest.PartitionTest(t)
v := TxnCommitments{}
diff --git a/data/bookkeeping/txn_merkle_test.go b/data/bookkeeping/txn_merkle_test.go
index 977af620d..30a34ab7f 100644
--- a/data/bookkeeping/txn_merkle_test.go
+++ b/data/bookkeeping/txn_merkle_test.go
@@ -94,7 +94,7 @@ func TestBlock_TxnMerkleTreeSHA256(t *testing.T) {
for ntxn := uint64(0); ntxn < 128; ntxn++ {
var b Block
- b.CurrentProtocol = protocol.ConsensusFuture
+ b.CurrentProtocol = protocol.ConsensusCurrentVersion
crypto.RandBytes(b.BlockHeader.GenesisHash[:])
var elems []txnMerkleElem
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 934fb4b6d..9b74ddc85 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -576,7 +576,7 @@ func TestLedgerErrorValidate(t *testing.T) {
err := l.AddBlock(blk, agreement.Certificate{})
// AddBlock is used in 2 places:
// - data.ledger.EnsureBlock which reports a log message as Error or Debug
- // - catchup.service.fetchAndWrite which leads to interrupting catchup or skiping the round
+ // - catchup.service.fetchAndWrite which leads to interrupting catchup or skipping the round
if err != nil {
switch err.(type) {
// The following two cases are okay to ignore, since these are expected and handled
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index 7d8ba6478..d5efc6c89 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -92,6 +92,10 @@ type TransactionPool struct {
// proposalAssemblyTime is the ProposalAssemblyTime configured for this node.
proposalAssemblyTime time.Duration
+
+ // stateproofOverflowed indicates that a stateproof transaction was allowed to
+ // exceed the txPoolMaxSize. This flag is reset to false OnNewBlock
+ stateproofOverflowed bool
}
// BlockEvaluator defines the block evaluator interface exposed by the ledger package.
@@ -125,7 +129,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Lo
}
pool.cond.L = &pool.mu
pool.assemblyCond.L = &pool.assemblyMu
- pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round), 0)
+ pool.recomputeBlockEvaluator(nil, 0)
return &pool
}
@@ -181,7 +185,7 @@ func (pool *TransactionPool) Reset() {
pool.numPendingWholeBlocks = 0
pool.pendingBlockEvaluator = nil
pool.statusCache.reset()
- pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round), 0)
+ pool.recomputeBlockEvaluator(nil, 0)
}
// NumExpired returns the number of transactions that expired at the
@@ -238,6 +242,7 @@ func (pool *TransactionPool) rememberCommit(flush bool) {
if flush {
pool.pendingTxGroups = pool.rememberedTxGroups
+ pool.stateproofOverflowed = false
pool.pendingTxids = pool.rememberedTxids
pool.ledger.VerifiedTransactionCache().UpdatePinned(pool.pendingTxids)
} else {
@@ -270,12 +275,22 @@ func (pool *TransactionPool) pendingCountNoLock() int {
}
// checkPendingQueueSize tests to see if we can grow the pending group transaction list
-// by adding txCount more transactions. The limits comes from the total number of transactions
+// by adding len(txnGroup) more transactions. The limits comes from the total number of transactions
// and not from the total number of transaction groups.
// As long as we haven't surpassed the size limit, we should be good to go.
-func (pool *TransactionPool) checkPendingQueueSize(txCount int) error {
+func (pool *TransactionPool) checkPendingQueueSize(txnGroup []transactions.SignedTxn) error {
pendingSize := pool.pendingTxIDsCount()
+ txCount := len(txnGroup)
if pendingSize+txCount > pool.txPoolMaxSize {
+ // Allow the state proof transaction to go over the txPoolMaxSize if it already didn't
+ if len(txnGroup) == 1 && txnGroup[0].Txn.Type == protocol.StateProofTx {
+ pool.pendingMu.Lock()
+ defer pool.pendingMu.Unlock()
+ if !pool.stateproofOverflowed {
+ pool.stateproofOverflowed = true
+ return nil
+ }
+ }
return fmt.Errorf("TransactionPool.checkPendingQueueSize: transaction pool have reached capacity")
}
return nil
@@ -329,12 +344,12 @@ func (pool *TransactionPool) computeFeePerByte() uint64 {
// checkSufficientFee take a set of signed transactions and verifies that each transaction has
// sufficient fee to get into the transaction pool
func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
- // Special case: the compact cert transaction, if issued from the
- // special compact-cert-sender address, in a singleton group, pays
+ // Special case: the state proof transaction, if issued from the
+ // special state-proof-sender address, in a singleton group, pays
// no fee.
if len(txgroup) == 1 {
t := txgroup[0].Txn
- if t.Type == protocol.CompactCertTx && t.Sender == transactions.CompactCertSender && t.Fee.IsZero() {
+ if t.Type == protocol.StateProofTx && t.Sender == transactions.StateProofSender && t.Fee.IsZero() {
return nil
}
}
@@ -356,7 +371,7 @@ func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn
// Test performs basic duplicate detection and well-formedness checks
// on a transaction group without storing the group.
func (pool *TransactionPool) Test(txgroup []transactions.SignedTxn) error {
- if err := pool.checkPendingQueueSize(len(txgroup)); err != nil {
+ if err := pool.checkPendingQueueSize(txgroup); err != nil {
return err
}
@@ -443,7 +458,7 @@ func (pool *TransactionPool) RememberOne(t transactions.SignedTxn) error {
// Remember stores the provided transaction group.
// Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn) error {
- if err := pool.checkPendingQueueSize(len(txgroup)); err != nil {
+ if err := pool.checkPendingQueueSize(txgroup); err != nil {
return err
}
@@ -640,7 +655,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.S
// recomputeBlockEvaluator constructs a new BlockEvaluator and feeds all
// in-pool transactions to it (removing any transactions that are rejected
// by the BlockEvaluator). Expects that the pool.mu mutex would be already taken.
-func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]basics.Round, knownCommitted uint) (stats telemetryspec.ProcessBlockMetrics) {
+func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]ledgercore.IncludedTransactions, knownCommitted uint) (stats telemetryspec.ProcessBlockMetrics) {
pool.pendingBlockEvaluator = nil
latest := pool.ledger.Latest()
@@ -771,6 +786,34 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
return
}
+func (pool *TransactionPool) getStateProofStats(txib *transactions.SignedTxnInBlock, encodedLen int) telemetryspec.StateProofStats {
+ stateProofStats := telemetryspec.StateProofStats{
+ ProvenWeight: 0,
+ SignedWeight: txib.Txn.StateProofTxnFields.StateProof.SignedWeight,
+ NumReveals: len(txib.Txn.StateProofTxnFields.StateProof.Reveals),
+ NumPosToReveal: len(txib.Txn.StateProofTxnFields.StateProof.PositionsToReveal),
+ TxnSize: encodedLen,
+ }
+
+ lastSPRound := basics.Round(txib.Txn.StateProofTxnFields.Message.LastAttestedRound)
+ lastRoundHdr, err := pool.ledger.BlockHdr(lastSPRound)
+ if err != nil {
+ return stateProofStats
+ }
+
+ proto := config.Consensus[lastRoundHdr.CurrentProtocol]
+ votersRound := lastSPRound.SubSaturate(basics.Round(proto.StateProofInterval))
+ votersRoundHdr, err := pool.ledger.BlockHdr(votersRound)
+ if err != nil {
+ return stateProofStats
+ }
+
+ totalWeight := votersRoundHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw
+ stateProofStats.ProvenWeight, _ = basics.Muldiv(totalWeight, uint64(proto.StateProofWeightThreshold), 1<<32)
+
+ return stateProofStats
+}
+
// AssembleBlock assembles a block for a given round, trying not to
// take longer than deadline to finish.
func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledgercore.ValidatedBlock, err error) {
@@ -816,6 +859,10 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
}
}
stats.TotalLength += uint64(encodedLen)
+ stats.StateProofNextRound = uint64(assembled.Block().StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ if txib.Txn.Type == protocol.StateProofTx {
+ stats.StateProofStats = pool.getStateProofStats(&txib, encodedLen)
+ }
}
stats.AverageFee = totalFees / uint64(stats.IncludedCount)
@@ -944,7 +991,7 @@ func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledgercore.Valid
defer pool.mu.Unlock()
// drop the current block evaluator and start with a new one.
- pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round), 0)
+ pool.recomputeBlockEvaluator(nil, 0)
// The above was already pregenerating the entire block,
// so there won't be any waiting on this call.
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 2f1bd75b6..7dcc4c6ba 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -17,24 +17,33 @@
package pools
import (
+ "bufio"
+ "bytes"
"fmt"
"math/rand"
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ cryptostateproof "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof"
+ "github.com/algorand/go-algorand/stateproof/verify"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -1271,3 +1280,222 @@ func TestTxPoolSizeLimits(t *testing.T) {
}
}
}
+
+func TestStateProofLogging(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ cfg := config.GetDefaultLocal()
+ cfg.TxPoolSize = testPoolSize
+ cfg.EnableProcessBlockStats = false
+
+ // Create 5 accounts, the last 3 uesd for signing the SP
+ numOfAccounts := 20
+ // Generate accounts
+ secrets := make([]*crypto.SignatureSecrets, numOfAccounts)
+ addresses := make([]basics.Address, numOfAccounts)
+ for i := 0; i < numOfAccounts; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ }
+ accountsBalances := make(map[basics.Address]uint64)
+ for _, addr := range addresses {
+ accountsBalances[addr] = 1000000000
+ }
+ initAccounts := initAcc(accountsBalances)
+
+ // Prepare the SP signing keys
+ allKeys := make([]*merklesignature.Secrets, 0, 3)
+ stateproofIntervals := uint64(256)
+ for a := 2; a < numOfAccounts; a++ {
+ keys, err := merklesignature.New(0, uint64(512), stateproofIntervals)
+ require.NoError(t, err)
+
+ acct := initAccounts[addresses[a]]
+ acct.StateProofID = keys.GetVerifier().Commitment
+ acct.Status = basics.Online
+ acct.VoteLastValid = 100000
+ initAccounts[addresses[a]] = acct
+
+ allKeys = append(allKeys, keys)
+ }
+
+ // Set the logging to capture the telemetry Metrics into logging
+ logger := logging.TestingLog(t)
+ logger.SetLevel(logging.Info)
+ logger.EnableTelemetry(logging.TelemetryConfig{Enable: true, SendToLog: true})
+ var buf bytes.Buffer
+ logger.SetOutput(&buf)
+
+ // Set the ledger and the transaction pool
+ mockLedger := makeMockLedger(t, initAccounts)
+ transactionPool := MakeTransactionPool(mockLedger, cfg, logger)
+ transactionPool.logAssembleStats = true
+
+ // Set the first round block
+ var b bookkeeping.Block
+ b.BlockHeader.GenesisID = "pooltest"
+ b.BlockHeader.GenesisHash = mockLedger.GenesisHash()
+ b.CurrentProtocol = protocol.ConsensusCurrentVersion
+ b.BlockHeader.Round = 1
+
+ phdr, err := mockLedger.BlockHdr(0)
+ require.NoError(t, err)
+ b.BlockHeader.Branch = phdr.Hash()
+
+ eval, err := mockLedger.StartEvaluator(b.BlockHeader, 0, 10000)
+ require.NoError(t, err)
+
+ // Simulate the blocks up to round 512 without any transactions
+ for i := 1; true; i++ {
+ blk, err := transactionPool.AssembleBlock(basics.Round(i), time.Time{})
+ require.NoError(t, err)
+
+ err = mockLedger.AddValidatedBlock(*blk, agreement.Certificate{})
+ require.NoError(t, err)
+
+ // Move to the next round
+ b.BlockHeader.Round++
+ transactionPool.OnNewBlock(blk.Block(), ledgercore.StateDelta{})
+
+ phdr, err := mockLedger.BlockHdr(basics.Round(i))
+ require.NoError(t, err)
+ b.BlockHeader.Branch = phdr.Hash()
+ b.BlockHeader.TimeStamp = phdr.TimeStamp + 10
+
+ if i == 513 {
+ break
+ }
+
+ eval, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 10000)
+ require.NoError(t, err)
+ }
+
+ // Prepare the transaction with the SP
+ round := basics.Round(512)
+ spRoundHdr, err := mockLedger.BlockHdr(round)
+ require.NoError(t, err)
+
+ votersRound := round.SubSaturate(basics.Round(proto.StateProofInterval))
+ votersRoundHdr, err := mockLedger.BlockHdr(votersRound)
+ require.NoError(t, err)
+
+ provenWeight, err := verify.GetProvenWeight(&votersRoundHdr, &spRoundHdr)
+ require.NoError(t, err)
+
+ lookback := votersRound.SubSaturate(basics.Round(proto.StateProofVotersLookback))
+ voters, err := mockLedger.VotersForStateProof(lookback)
+ require.NoError(t, err)
+ require.NotNil(t, voters)
+
+ // Get the message
+ msg, err := stateproof.GenerateStateProofMessage(mockLedger, uint64(votersRound), spRoundHdr)
+
+ // Get the SP
+ proof := generateProofForTesting(uint64(round), msg, provenWeight, voters.Participants, voters.Tree, allKeys, t)
+
+ // Set the transaction with the SP
+ var stxn transactions.SignedTxn
+ stxn.Txn.Type = protocol.StateProofTx
+ stxn.Txn.Sender = transactions.StateProofSender
+ stxn.Txn.FirstValid = 512
+ stxn.Txn.LastValid = 1024
+ stxn.Txn.GenesisHash = mockLedger.GenesisHash()
+ stxn.Txn.StateProofType = protocol.StateProofBasic
+ stxn.Txn.StateProof = *proof
+ require.NoError(t, err)
+ stxn.Txn.Message = msg
+
+ err = stxn.Txn.WellFormed(transactions.SpecialAddresses{}, proto)
+ require.NoError(t, err)
+
+ // Add it to the transaction pool and assemble the block
+ eval, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 1000000)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ err = transactionPool.RememberOne(stxn)
+ require.NoError(t, err)
+ transactionPool.recomputeBlockEvaluator(nil, 0)
+ _, err = transactionPool.AssembleBlock(514, time.Time{})
+ require.NoError(t, err)
+
+ // parse the log messages and retreive the Metrics for SP in assmbe block
+ scanner := bufio.NewScanner(strings.NewReader(buf.String()))
+ lines := make([]string, 0)
+ for scanner.Scan() {
+ lines = append(lines, scanner.Text())
+ }
+ fmt.Println(lines[len(lines)-1])
+ parts := strings.Split(lines[len(lines)-1], "StateProofNextRound:")
+
+ // Verify the Metrics is correct
+ var nextRound, pWeight, signedWeight, numReveals, posToReveal, txnSize uint64
+ var str1 string
+ fmt.Sscanf(parts[1], "%d, ProvenWeight:%d, SignedWeight:%d, NumReveals:%d, NumPosToReveal:%d, TxnSize:%d\"%s",
+ &nextRound, &pWeight, &signedWeight, &numReveals, &posToReveal, &txnSize, &str1)
+ require.Equal(t, uint64(768), nextRound)
+ require.Equal(t, provenWeight, pWeight)
+ require.Equal(t, proof.SignedWeight, signedWeight)
+ require.Less(t, numOfAccounts/2, int(numReveals))
+ require.Greater(t, numOfAccounts, int(numReveals))
+ require.Equal(t, len(proof.PositionsToReveal), int(posToReveal))
+ stxn.Txn.GenesisHash = crypto.Digest{}
+ require.Equal(t, stxn.GetEncodedLength(), int(txnSize))
+}
+
+// Given the round number, partArray and partTree from the previous period block, the keys and the totalWeight
+// return a stateProof which can be submitted in a transaction to the transaction pool and assembled into a new block.
+func generateProofForTesting(
+ round uint64,
+ msg stateproofmsg.Message,
+ provenWeight uint64,
+ partArray basics.ParticipantsArray,
+ partTree *merklearray.Tree,
+ allKeys []*merklesignature.Secrets,
+ t *testing.T) *cryptostateproof.StateProof {
+
+ data := msg.Hash()
+
+ // Sign with the participation keys
+ sigs := make(map[merklesignature.Verifier]merklesignature.Signature)
+ for _, keys := range allKeys {
+ signerInRound := keys.GetSigner(round)
+ sig, err := signerInRound.SignBytes(data[:])
+ require.NoError(t, err)
+ sigs[*keys.GetVerifier()] = sig
+ }
+
+ // Prepare the builder
+ stateProofStrengthTargetForTests := config.Consensus[protocol.ConsensusCurrentVersion].StateProofStrengthTarget
+ b, err := cryptostateproof.MakeBuilder(data, round, provenWeight,
+ partArray, partTree, stateProofStrengthTargetForTests)
+ require.NoError(t, err)
+
+ // Add the signatures
+ for i := range partArray {
+ p, err := b.Present(uint64(i))
+ require.False(t, p)
+ require.NoError(t, err)
+ s := sigs[partArray[i].PK]
+ err = b.IsValid(uint64(i), &s, true)
+ require.NoError(t, err)
+ b.Add(uint64(i), s)
+
+ // sanity check that the builder add the signature
+ isPresent, err := b.Present(uint64(i))
+ require.NoError(t, err)
+ require.True(t, isPresent)
+ }
+
+ // Build the SP
+ proof, err := b.Build()
+ require.NoError(t, err)
+
+ return proof
+}
diff --git a/data/stateproofmsg/message.go b/data/stateproofmsg/message.go
new file mode 100644
index 000000000..aea195475
--- /dev/null
+++ b/data/stateproofmsg/message.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproofmsg
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ sp "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// Message represents the message that the state proofs are attesting to. This message can be
+// used by lightweight client and gives it the ability to verify proofs on the Algorand's state.
+// In addition to that proof, this message also contains fields that
+// are needed in order to verify the next state proofs (VotersCommitment and LnProvenWeight).
+type Message struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ // BlockHeadersCommitment contains a commitment on all light block headers within a state proof interval.
+ BlockHeadersCommitment []byte `codec:"b,allocbound=crypto.Sha256Size"`
+ VotersCommitment []byte `codec:"v,allocbound=crypto.SumhashDigestSize"`
+ LnProvenWeight uint64 `codec:"P"`
+ FirstAttestedRound uint64 `codec:"f"`
+ LastAttestedRound uint64 `codec:"l"`
+}
+
+// ToBeHashed returns the bytes of the message.
+func (m Message) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.StateProofMessage, protocol.Encode(&m)
+}
+
+// Hash returns a hashed representation fitting the state proof messages.
+func (m *Message) Hash() sp.MessageHash {
+ digest := crypto.GenericHashObj(crypto.HashFactory{HashType: sp.MessageHashType}.NewHash(), m)
+ result := sp.MessageHash{}
+ copy(result[:], digest)
+ return result
+}
diff --git a/data/stateproofmsg/msgp_gen.go b/data/stateproofmsg/msgp_gen.go
new file mode 100644
index 000000000..d8dfb3948
--- /dev/null
+++ b/data/stateproofmsg/msgp_gen.go
@@ -0,0 +1,257 @@
+package stateproofmsg
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
+)
+
+// The following msgp objects are implemented in this file:
+// Message
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+
+// MarshalMsg implements msgp.Marshaler
+func (z *Message) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(5)
+ var zb0001Mask uint8 /* 6 bits */
+ if (*z).LnProvenWeight == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ if len((*z).BlockHeadersCommitment) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).FirstAttestedRound == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).LastAttestedRound == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if len((*z).VotersCommitment) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "P"
+ o = append(o, 0xa1, 0x50)
+ o = msgp.AppendUint64(o, (*z).LnProvenWeight)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "b"
+ o = append(o, 0xa1, 0x62)
+ o = msgp.AppendBytes(o, (*z).BlockHeadersCommitment)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "f"
+ o = append(o, 0xa1, 0x66)
+ o = msgp.AppendUint64(o, (*z).FirstAttestedRound)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "l"
+ o = append(o, 0xa1, 0x6c)
+ o = msgp.AppendUint64(o, (*z).LastAttestedRound)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).VotersCommitment)
+ }
+ }
+ return
+}
+
+func (_ *Message) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Message)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "BlockHeadersCommitment")
+ return
+ }
+ if zb0003 > crypto.Sha256Size {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(crypto.Sha256Size))
+ return
+ }
+ (*z).BlockHeadersCommitment, bts, err = msgp.ReadBytesBytes(bts, (*z).BlockHeadersCommitment)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "BlockHeadersCommitment")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VotersCommitment")
+ return
+ }
+ if zb0004 > crypto.SumhashDigestSize {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(crypto.SumhashDigestSize))
+ return
+ }
+ (*z).VotersCommitment, bts, err = msgp.ReadBytesBytes(bts, (*z).VotersCommitment)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VotersCommitment")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LnProvenWeight")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).FirstAttestedRound, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "FirstAttestedRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).LastAttestedRound, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastAttestedRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = Message{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "b":
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "BlockHeadersCommitment")
+ return
+ }
+ if zb0005 > crypto.Sha256Size {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(crypto.Sha256Size))
+ return
+ }
+ (*z).BlockHeadersCommitment, bts, err = msgp.ReadBytesBytes(bts, (*z).BlockHeadersCommitment)
+ if err != nil {
+ err = msgp.WrapError(err, "BlockHeadersCommitment")
+ return
+ }
+ case "v":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VotersCommitment")
+ return
+ }
+ if zb0006 > crypto.SumhashDigestSize {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(crypto.SumhashDigestSize))
+ return
+ }
+ (*z).VotersCommitment, bts, err = msgp.ReadBytesBytes(bts, (*z).VotersCommitment)
+ if err != nil {
+ err = msgp.WrapError(err, "VotersCommitment")
+ return
+ }
+ case "P":
+ (*z).LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LnProvenWeight")
+ return
+ }
+ case "f":
+ (*z).FirstAttestedRound, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FirstAttestedRound")
+ return
+ }
+ case "l":
+ (*z).LastAttestedRound, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastAttestedRound")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *Message) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Message)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Message) Msgsize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + len((*z).BlockHeadersCommitment) + 2 + msgp.BytesPrefixSize + len((*z).VotersCommitment) + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *Message) MsgIsZero() bool {
+ return (len((*z).BlockHeadersCommitment) == 0) && (len((*z).VotersCommitment) == 0) && ((*z).LnProvenWeight == 0) && ((*z).FirstAttestedRound == 0) && ((*z).LastAttestedRound == 0)
+}
diff --git a/data/stateproofmsg/msgp_gen_test.go b/data/stateproofmsg/msgp_gen_test.go
new file mode 100644
index 000000000..c8ce88e55
--- /dev/null
+++ b/data/stateproofmsg/msgp_gen_test.go
@@ -0,0 +1,75 @@
+//go:build !skip_msgp_testing
+// +build !skip_msgp_testing
+
+package stateproofmsg
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestMarshalUnmarshalMessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := Message{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingMessage(t *testing.T) {
+ protocol.RunEncodingTest(t, &Message{})
+}
+
+func BenchmarkMarshalMsgMessage(b *testing.B) {
+ v := Message{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgMessage(b *testing.B) {
+ v := Message{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalMessage(b *testing.B) {
+ v := Message{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/data/transactions/keyreg.go b/data/transactions/keyreg.go
index 16f7a1f32..401617ef7 100644
--- a/data/transactions/keyreg.go
+++ b/data/transactions/keyreg.go
@@ -28,7 +28,7 @@ type KeyregTxnFields struct {
VotePK crypto.OneTimeSignatureVerifier `codec:"votekey"`
SelectionPK crypto.VRFVerifier `codec:"selkey"`
- StateProofPK merklesignature.Verifier `codec:"sprfkey"`
+ StateProofPK merklesignature.Commitment `codec:"sprfkey"`
VoteFirst basics.Round `codec:"votefst"`
VoteLast basics.Round `codec:"votelst"`
VoteKeyDilution uint64 `codec:"votekd"`
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 597b47763..0494971b3 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -47,9 +47,7 @@ programs, AVM code is versioned. When new opcodes are introduced, or
behavior is changed, a new version is introduced. Programs carrying
old versions are executed with their original semantics. In the AVM
bytecode, the version is an incrementing integer, currently 6, and
-denoted vX throughout this document. User friendly version numbers
-that correspond to programmer expectations, such as `AVM 1.0` map to
-these integers. AVM 0.9 is v4. AVM 1.0 is v5. AVM 1.1 is v6.
+denoted vX throughout this document.
## Execution Modes
@@ -276,9 +274,7 @@ return stack matches the name of the input value.
| `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} |
| `ecdsa_pk_recover v` | for (data A, recovery id B, signature C, D) recover a public key |
| `ecdsa_pk_decompress v` | decompress pubkey A into components X, Y |
-| `bn256_add` | for (curve points A and B) return the curve point A + B |
-| `bn256_scalar_mul` | for (curve point A, scalar K) return the curve point KA |
-| `bn256_pairing` | for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1} |
+| `vrf_verify s` | Verify the proof B of message A against pubkey C. Returns vrf output and verification flag. |
| `+` | A plus B. Fail on overflow. |
| `-` | A minus B. Fail if B > A. |
| `/` | A divided by B (truncated division). Fail if B == 0. |
@@ -323,14 +319,14 @@ return stack matches the name of the input value.
| `substring s e` | A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails |
| `substring3` | A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails |
| `extract s l` | A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails |
-| `extract3` | A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails |
+| `extract3` | A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails<br />`extract3` can be called using `extract` with no immediates. |
| `extract_uint16` | A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails |
| `extract_uint32` | A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails |
| `extract_uint64` | A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails |
-| `replace2 s` | Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A) |
-| `replace3` | Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A) |
+| `replace2 s` | Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)<br />`replace2` can be called using `replace` with 1 immediate. |
+| `replace3` | Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)<br />`replace3` can be called using `replace` with no immediates. |
| `base64_decode e` | decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E |
-| `json_ref r` | return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A |
+| `json_ref r` | key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A |
The following opcodes take byte-array values that are interpreted as
big-endian unsigned integers. For mathematical operators, the
@@ -403,12 +399,12 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `args` | Ath LogicSig argument |
| `txn f` | field F of current transaction |
| `gtxn t f` | field F of the Tth transaction in the current group |
-| `txna f i` | Ith value of the array field F of the current transaction |
+| `txna f i` | Ith value of the array field F of the current transaction<br />`txna` can be called using `txn` with 2 immediates. |
| `txnas f` | Ath value of the array field F of the current transaction |
-| `gtxna t f i` | Ith value of the array field F from the Tth transaction in the current group |
+| `gtxna t f i` | Ith value of the array field F from the Tth transaction in the current group<br />`gtxna` can be called using `gtxn` with 3 immediates. |
| `gtxnas t f` | Ath value of the array field F from the Tth transaction in the current group |
| `gtxns f` | field F of the Ath transaction in the current group |
-| `gtxnsa f i` | Ith value of the array field F from the Ath transaction in the current group |
+| `gtxnsa f i` | Ith value of the array field F from the Ath transaction in the current group<br />`gtxnsa` can be called using `gtxns` with 2 immediates. |
| `gtxnsas f` | Bth value of the array field F from the Ath transaction in the current group |
| `global f` | global field F |
| `load i` | Ith scratch space value. All scratch spaces are 0 at program start. |
@@ -421,14 +417,14 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `gaid t` | ID of the asset or application created in the Tth transaction of the current group |
| `gaids` | ID of the asset or application created in the Ath transaction of the current group |
-**Transaction Fields**
-
+#### Transaction Fields
+##### Scalar Fields
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 0 | Sender | []byte | | 32 byte address |
| 1 | Fee | uint64 | | microalgos |
| 2 | FirstValid | uint64 | | round number |
-| 3 | FirstValidTime | uint64 | | Causes program to fail; reserved for future use |
+| 3 | FirstValidTime | uint64 | v7 | UNIX timestamp of block before txn.FirstValid. Fails if negative |
| 4 | LastValid | uint64 | | round number |
| 5 | Note | []byte | | Any data up to 1024 bytes |
| 6 | Lease | []byte | | 32 byte lease value |
@@ -441,19 +437,17 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 13 | VoteLast | uint64 | | The last round that the participation key is valid. |
| 14 | VoteKeyDilution | uint64 | | Dilution for the 2-level participation key |
| 15 | Type | []byte | | Transaction type as bytes |
-| 16 | TypeEnum | uint64 | | See table below |
+| 16 | TypeEnum | uint64 | | Transaction type as integer |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
| 23 | TxID | []byte | | The computed ID for this transaction. 32 bytes. |
| 24 | ApplicationID | uint64 | v2 | ApplicationID from ApplicationCall transaction |
| 25 | OnCompletion | uint64 | v2 | ApplicationCall transaction on completion action |
-| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
| 27 | NumAppArgs | uint64 | v2 | Number of ApplicationArgs |
-| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
| 29 | NumAccounts | uint64 | v2 | Number of Accounts |
| 30 | ApprovalProgram | []byte | v2 | Approval program |
| 31 | ClearStateProgram | []byte | v2 | Clear state program |
@@ -473,9 +467,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 45 | FreezeAsset | uint64 | v2 | Asset ID being frozen or un-frozen |
| 46 | FreezeAssetAccount | []byte | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
| 47 | FreezeAssetFrozen | uint64 | v2 | The new frozen value, 0 or 1 |
-| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
| 49 | NumAssets | uint64 | v3 | Number of Assets |
-| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
| 51 | NumApplications | uint64 | v3 | Number of Applications |
| 52 | GlobalNumUint | uint64 | v3 | Number of global state integers in ApplicationCall |
| 53 | GlobalNumByteSlice | uint64 | v3 | Number of global state byteslices in ApplicationCall |
@@ -483,12 +475,24 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 55 | LocalNumByteSlice | uint64 | v3 | Number of local state byteslices in ApplicationCall |
| 56 | ExtraProgramPages | uint64 | v4 | Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program. |
| 57 | Nonparticipation | uint64 | v5 | Marks an account nonparticipating for rewards |
-| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
| 59 | NumLogs | uint64 | v5 | Number of Logs (only with `itxn` in v5). Application mode only |
| 60 | CreatedAssetID | uint64 | v5 | Asset ID allocated by the creation of an ASA (only with `itxn` in v5). Application mode only |
| 61 | CreatedApplicationID | uint64 | v5 | ApplicationID allocated by the creation of an application (only with `itxn` in v5). Application mode only |
| 62 | LastLog | []byte | v6 | The last message emitted. Empty bytes if none were emitted. Application mode only |
-| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key commitment |
+| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key |
+| 65 | NumApprovalProgramPages | uint64 | v7 | Number of Approval Program pages |
+| 67 | NumClearStateProgramPages | uint64 | v7 | Number of ClearState Program pages |
+
+##### Array Fields
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
+| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
+| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
+| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
+| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
+| 64 | ApprovalProgramPages | []byte | v7 | Approval Program as an array of pages |
+| 66 | ClearStateProgramPages | []byte | v7 | ClearState Program as an array of pages |
Additional details in the [opcodes document](TEAL_opcodes.md#txn) on the `txn` op.
@@ -611,6 +615,7 @@ Account fields used in the `acct_params_get` opcode.
| `app_params_get f` | X is field F from app A. Y is 1 if A exists, else 0 |
| `acct_params_get f` | X is field F from account A. Y is 1 if A owns positive algos, else 0 |
| `log` | write A to log state of the current application |
+| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive) |
### Inner Transactions
@@ -676,7 +681,7 @@ The assembler parses line by line. Ops that only take stack arguments
appear on a line by themselves. Immediate arguments follow the opcode
on the same line, separated by whitespace.
-The first line may contain a special version pragma `#pragma version X`, which directs the assembler to generate AVM bytecode targeting a certain version. For instance, `#pragma version 2` produces bytecode targeting TEAL v2. By default, the assembler targets TEAL v1.
+The first line may contain a special version pragma `#pragma version X`, which directs the assembler to generate bytecode targeting a certain version. For instance, `#pragma version 2` produces bytecode targeting v2. By default, the assembler targets v1.
Subsequent lines may contain other pragma declarations (i.e., `#pragma <some-specification>`), pertaining to checks that the assembler should perform before agreeing to emit the program bytes, specific optimizations, etc. Those declarations are optional and cannot alter the semantics as described in this document.
@@ -727,25 +732,26 @@ A compiled program starts with a varuint declaring the version of the compiled c
For version 1, subsequent bytes after the varuint are program opcode bytes. Future versions could put other metadata following the version identifier.
-It is important to prevent newly-introduced transaction fields from
-breaking assumptions made by older versions of the AVM. If one of the
-transactions in a group will execute a program whose version predates
-a given field, that field must not be set anywhere in the transaction
-group, or the group will be rejected. For example, executing a version
-1 program on a transaction with RekeyTo set to a nonzero address will
-cause the program to fail, regardless of the other contents of the
-program itself.
+It is important to prevent newly-introduced transaction types and
+fields from breaking assumptions made by programs written before they
+existed. If one of the transactions in a group will execute a program
+whose version predates a transaction type or field that can violate
+expectations, that transaction type or field must not be used anywhere
+in the transaction group.
+
+Concretely, the above requirement is translated as follows: A v1
+program included in a transaction group that includes a
+ApplicationCall transaction or a non-zero RekeyTo field will fail
+regardless of the program itself.
This requirement is enforced as follows:
* For every transaction, compute the earliest version that supports
- all the fields and values in this transaction. For example, a
- transaction with a nonzero RekeyTo field will be (at least) v2.
-
-* Compute the largest version number across all the transactions in a group (of size 1 or more), call it `maxVerNo`. If any transaction in this group has a program with a version smaller than `maxVerNo`, then that TEAL program will fail.
+ all the fields and values in this transaction.
+
+* Compute the largest version number across all the transactions in a group (of size 1 or more), call it `maxVerNo`. If any transaction in this group has a program with a version smaller than `maxVerNo`, then that program will fail.
-In addition, applications must be version 6 or greater to be eligible
-for being called in an inner transaction.
+In addition, applications must be v4 or greater to be called in an inner transaction.
## Varuint
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index 8464ec2dc..c323b51de 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -47,9 +47,7 @@ programs, AVM code is versioned. When new opcodes are introduced, or
behavior is changed, a new version is introduced. Programs carrying
old versions are executed with their original semantics. In the AVM
bytecode, the version is an incrementing integer, currently 6, and
-denoted vX throughout this document. User friendly version numbers
-that correspond to programmer expectations, such as `AVM 1.0` map to
-these integers. AVM 0.9 is v4. AVM 1.0 is v5. AVM 1.1 is v6.
+denoted vX throughout this document.
## Execution Modes
@@ -277,9 +275,11 @@ Some of these have immediate data in the byte or bytes after the opcode.
@@ Loading_Values.md @@
-**Transaction Fields**
-
+#### Transaction Fields
+##### Scalar Fields
@@ txn_fields.md @@
+##### Array Fields
+@@ txna_fields.md @@
Additional details in the [opcodes document](TEAL_opcodes.md#txn) on the `txn` op.
@@ -370,7 +370,7 @@ The assembler parses line by line. Ops that only take stack arguments
appear on a line by themselves. Immediate arguments follow the opcode
on the same line, separated by whitespace.
-The first line may contain a special version pragma `#pragma version X`, which directs the assembler to generate AVM bytecode targeting a certain version. For instance, `#pragma version 2` produces bytecode targeting TEAL v2. By default, the assembler targets TEAL v1.
+The first line may contain a special version pragma `#pragma version X`, which directs the assembler to generate bytecode targeting a certain version. For instance, `#pragma version 2` produces bytecode targeting v2. By default, the assembler targets v1.
Subsequent lines may contain other pragma declarations (i.e., `#pragma <some-specification>`), pertaining to checks that the assembler should perform before agreeing to emit the program bytes, specific optimizations, etc. Those declarations are optional and cannot alter the semantics as described in this document.
@@ -421,25 +421,26 @@ A compiled program starts with a varuint declaring the version of the compiled c
For version 1, subsequent bytes after the varuint are program opcode bytes. Future versions could put other metadata following the version identifier.
-It is important to prevent newly-introduced transaction fields from
-breaking assumptions made by older versions of the AVM. If one of the
-transactions in a group will execute a program whose version predates
-a given field, that field must not be set anywhere in the transaction
-group, or the group will be rejected. For example, executing a version
-1 program on a transaction with RekeyTo set to a nonzero address will
-cause the program to fail, regardless of the other contents of the
-program itself.
+It is important to prevent newly-introduced transaction types and
+fields from breaking assumptions made by programs written before they
+existed. If one of the transactions in a group will execute a program
+whose version predates a transaction type or field that can violate
+expectations, that transaction type or field must not be used anywhere
+in the transaction group.
+
+Concretely, the above requirement is translated as follows: A v1
+program included in a transaction group that includes a
+ApplicationCall transaction or a non-zero RekeyTo field will fail
+regardless of the program itself.
This requirement is enforced as follows:
* For every transaction, compute the earliest version that supports
- all the fields and values in this transaction. For example, a
- transaction with a nonzero RekeyTo field will be (at least) v2.
-
-* Compute the largest version number across all the transactions in a group (of size 1 or more), call it `maxVerNo`. If any transaction in this group has a program with a version smaller than `maxVerNo`, then that TEAL program will fail.
+ all the fields and values in this transaction.
+
+* Compute the largest version number across all the transactions in a group (of size 1 or more), call it `maxVerNo`. If any transaction in this group has a program with a version smaller than `maxVerNo`, then that program will fail.
-In addition, applications must be version 6 or greater to be eligible
-for being called in an inner transaction.
+In addition, applications must be v4 or greater to be called in an inner transaction.
## Varuint
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index cd7a9a952..5fbd310d2 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -364,7 +364,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 0 | Sender | []byte | | 32 byte address |
| 1 | Fee | uint64 | | microalgos |
| 2 | FirstValid | uint64 | | round number |
-| 3 | FirstValidTime | uint64 | | Causes program to fail; reserved for future use |
+| 3 | FirstValidTime | uint64 | v7 | UNIX timestamp of block before txn.FirstValid. Fails if negative |
| 4 | LastValid | uint64 | | round number |
| 5 | Note | []byte | | Any data up to 1024 bytes |
| 6 | Lease | []byte | | 32 byte lease value |
@@ -377,19 +377,17 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 13 | VoteLast | uint64 | | The last round that the participation key is valid. |
| 14 | VoteKeyDilution | uint64 | | Dilution for the 2-level participation key |
| 15 | Type | []byte | | Transaction type as bytes |
-| 16 | TypeEnum | uint64 | | See table below |
+| 16 | TypeEnum | uint64 | | Transaction type as integer |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
| 23 | TxID | []byte | | The computed ID for this transaction. 32 bytes. |
| 24 | ApplicationID | uint64 | v2 | ApplicationID from ApplicationCall transaction |
| 25 | OnCompletion | uint64 | v2 | ApplicationCall transaction on completion action |
-| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
| 27 | NumAppArgs | uint64 | v2 | Number of ApplicationArgs |
-| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
| 29 | NumAccounts | uint64 | v2 | Number of Accounts |
| 30 | ApprovalProgram | []byte | v2 | Approval program |
| 31 | ClearStateProgram | []byte | v2 | Clear state program |
@@ -409,9 +407,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 45 | FreezeAsset | uint64 | v2 | Asset ID being frozen or un-frozen |
| 46 | FreezeAssetAccount | []byte | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
| 47 | FreezeAssetFrozen | uint64 | v2 | The new frozen value, 0 or 1 |
-| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
| 49 | NumAssets | uint64 | v3 | Number of Assets |
-| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
| 51 | NumApplications | uint64 | v3 | Number of Applications |
| 52 | GlobalNumUint | uint64 | v3 | Number of global state integers in ApplicationCall |
| 53 | GlobalNumByteSlice | uint64 | v3 | Number of global state byteslices in ApplicationCall |
@@ -419,15 +415,14 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 55 | LocalNumByteSlice | uint64 | v3 | Number of local state byteslices in ApplicationCall |
| 56 | ExtraProgramPages | uint64 | v4 | Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program. |
| 57 | Nonparticipation | uint64 | v5 | Marks an account nonparticipating for rewards |
-| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
| 59 | NumLogs | uint64 | v5 | Number of Logs (only with `itxn` in v5). Application mode only |
| 60 | CreatedAssetID | uint64 | v5 | Asset ID allocated by the creation of an ASA (only with `itxn` in v5). Application mode only |
| 61 | CreatedApplicationID | uint64 | v5 | ApplicationID allocated by the creation of an application (only with `itxn` in v5). Application mode only |
| 62 | LastLog | []byte | v6 | The last message emitted. Empty bytes if none were emitted. Application mode only |
-| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key commitment |
-
+| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key |
+| 65 | NumApprovalProgramPages | uint64 | v7 | Number of Approval Program pages |
+| 67 | NumClearStateProgramPages | uint64 | v7 | Number of ClearState Program pages |
-FirstValidTime causes the program to fail. The field is reserved for future use.
## global f
@@ -480,14 +475,27 @@ for notes on transaction fields available, see `txn`. If this transaction is _i_
- Opcode: 0x36 {uint8 transaction field index} {uint8 transaction field array index}
- Stack: ... &rarr; ..., any
-- Ith value of the array field F of the current transaction
+- Ith value of the array field F of the current transaction<br />`txna` can be called using `txn` with 2 immediates.
- Availability: v2
+`txna` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):
+
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
+| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
+| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
+| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
+| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
+| 64 | ApprovalProgramPages | []byte | v7 | Approval Program as an array of pages |
+| 66 | ClearStateProgramPages | []byte | v7 | ClearState Program as an array of pages |
+
+
## gtxna t f i
- Opcode: 0x37 {uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}
- Stack: ... &rarr; ..., any
-- Ith value of the array field F from the Tth transaction in the current group
+- Ith value of the array field F from the Tth transaction in the current group<br />`gtxna` can be called using `gtxn` with 3 immediates.
- Availability: v2
## gtxns f
@@ -503,7 +511,7 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
- Opcode: 0x39 {uint8 transaction field index} {uint8 transaction field array index}
- Stack: ..., A: uint64 &rarr; ..., any
-- Ith value of the array field F from the Ath transaction in the current group
+- Ith value of the array field F from the Ath transaction in the current group<br />`gtxnsa` can be called using `gtxns` with 2 immediates.
- Availability: v3
## gload t i
@@ -722,7 +730,7 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- Opcode: 0x58
- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
-- A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails
+- A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails<br />`extract3` can be called using `extract` with no immediates.
- Availability: v5
## extract_uint16
@@ -750,14 +758,14 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- Opcode: 0x5c {uint8 start position}
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
-- Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)
+- Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)<br />`replace2` can be called using `replace` with 1 immediate.
- Availability: v7
## replace3
- Opcode: 0x5d
- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ..., []byte
-- Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)
+- Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)<br />`replace3` can be called using `replace` with no immediates.
- Availability: v7
## base64_decode e
@@ -776,13 +784,15 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
| 1 | StdEncoding | |
-Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See <a href="https://rfc-editor.org/rfc/rfc4648.html#section-4">RFC 4648</a> (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`.
+*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.
+
+ Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`.
## json_ref r
- Opcode: 0x5f {string return type}
- Stack: ..., A: []byte, B: []byte &rarr; ..., any
-- return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A
+- key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A
- **Cost**: 25 + 2 per 7 bytes of A
- Availability: v7
@@ -795,7 +805,9 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar
| 2 | JSONObject | []byte | |
-specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.
+*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.
+
+Almost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.
## balance
@@ -1112,36 +1124,6 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- **Cost**: 130
- Availability: v7
-## bn256_add
-
-- Opcode: 0x99
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
-- for (curve points A and B) return the curve point A + B
-- **Cost**: 70
-- Availability: v7
-
-A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.
-
-## bn256_scalar_mul
-
-- Opcode: 0x9a
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
-- for (curve point A, scalar K) return the curve point KA
-- **Cost**: 970
-- Availability: v7
-
-A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.
-
-## bn256_pairing
-
-- Opcode: 0x9b
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
-- for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1}
-- **Cost**: 8700
-- Availability: v7
-
-G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.
-
## b+
- Opcode: 0xa0
@@ -1397,3 +1379,35 @@ G1s are encoded by the concatenation of encoded G1 points, as described in `bn25
- Ath value of the array field F from the Tth transaction in the last inner group submitted
- Availability: v6
- Mode: Application
+
+## vrf_verify s
+
+- Opcode: 0xd0 {uint8 parameters index}
+- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: uint64
+- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
+- **Cost**: 5700
+- Availability: v7
+
+`vrf_verify` Standards:
+
+| Index | Name | Notes |
+| - | ------ | --------- |
+| 0 | VrfAlgorand | |
+
+
+`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).
+
+## block f
+
+- Opcode: 0xd1 {uint8 block field}
+- Stack: ..., A: uint64 &rarr; ..., any
+- field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)
+- Availability: v7
+
+`block` Fields:
+
+| Index | Name | Type | Notes |
+| - | ------ | -- | --------- |
+| 0 | BlkSeed | []byte | |
+| 1 | BlkTimestamp | uint64 | |
+
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 6a05596b2..e175a1703 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -247,6 +247,9 @@ type OpStream struct {
OffsetToLine map[int]int
HasStatefulOps bool
+
+ // Need new copy for each opstream
+ versionedPseudoOps map[string]map[int]OpSpec
}
// newOpStream constructs OpStream instances ready to invoke assemble. A new
@@ -857,43 +860,6 @@ func simpleImm(value string, label string) (byte, error) {
return byte(res), err
}
-// asmTxn2 delegates to asmTxn or asmTxna depending on number of operands
-func asmTxn2(ops *OpStream, spec *OpSpec, args []string) error {
- switch len(args) {
- case 1:
- txn := OpsByName[1]["txn"] // v1 txn opcode does not have array names
- return asmDefault(ops, &txn, args)
- case 2:
- txna := OpsByName[ops.Version]["txna"]
- return asmDefault(ops, &txna, args)
- default:
- return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
- }
-}
-
-func asmGtxn2(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) == 2 {
- gtxn := OpsByName[1]["gtxn"] // v1 gtxn opcode does not have array names
- return asmDefault(ops, &gtxn, args)
- }
- if len(args) == 3 {
- gtxna := OpsByName[ops.Version]["gtxna"]
- return asmDefault(ops, &gtxna, args)
- }
- return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
-}
-
-func asmGtxns(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) == 1 {
- return asmDefault(ops, spec, args)
- }
- if len(args) == 2 {
- gtxnsa := OpsByName[ops.Version]["gtxnsa"]
- return asmDefault(ops, &gtxnsa, args)
- }
- return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
-}
-
func asmItxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 1 {
return asmDefault(ops, spec, args)
@@ -929,7 +895,7 @@ func asmItxnField(ops *OpStream, spec *OpSpec, args []string) error {
return ops.errorf("%s %#v is not allowed.", spec.Name, args[0])
}
if fs.itxVersion > ops.Version {
- return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
+ return ops.errorf("%s %s field was introduced in v%d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
}
ops.pending.WriteByte(spec.Opcode)
ops.pending.WriteByte(fs.Field())
@@ -949,19 +915,52 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
}
ops.pending.WriteByte(spec.Opcode)
for i, imm := range spec.OpDetails.Immediates {
+ var correctImmediates []string
+ var numImmediatesWithField []int
+ pseudos, isPseudoName := ops.versionedPseudoOps[spec.Name]
switch imm.kind {
case immByte:
if imm.Group != nil {
fs, ok := imm.Group.SpecByName(args[i])
if !ok {
+ _, err := simpleImm(args[i], "")
+ if err == nil {
+ // User supplied a uint, so we see if any of the other immediates take uints
+ for j, otherImm := range spec.OpDetails.Immediates {
+ if otherImm.kind == immByte && otherImm.Group == nil {
+ correctImmediates = append(correctImmediates, strconv.Itoa(j+1))
+ }
+ }
+ if len(correctImmediates) > 0 {
+ errMsg := spec.Name
+ if isPseudoName {
+ errMsg += " with " + joinIntsOnOr("immediate", len(args))
+ }
+ return ops.errorf("%s can only use %#v as immediate %s", errMsg, args[i], strings.Join(correctImmediates, " or "))
+ }
+ }
+ if isPseudoName {
+ for numImms, ps := range pseudos {
+ for _, psImm := range ps.OpDetails.Immediates {
+ if psImm.kind == immByte && psImm.Group != nil {
+ if _, ok := psImm.Group.SpecByName(args[i]); ok {
+ numImmediatesWithField = append(numImmediatesWithField, numImms)
+ }
+ }
+ }
+ }
+ if len(numImmediatesWithField) > 0 {
+ return ops.errorf("%#v field of %s can only be used with %s", args[i], spec.Name, joinIntsOnOr("immediate", numImmediatesWithField...))
+ }
+ }
return ops.errorf("%s unknown field: %#v", spec.Name, args[i])
}
- // refine the typestack now, so it is maintain even if there's a version error
+ // refine the typestack now, so it is maintained even if there's a version error
if fs.Type().Typed() {
ops.returns(spec, fs.Type())
}
if fs.Version() > ops.Version {
- return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?",
+ return ops.errorf("%s %s field was introduced in v%d. Missed #pragma version?",
spec.Name, args[i], fs.Version())
}
ops.pending.WriteByte(fs.Field())
@@ -969,6 +968,23 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
// simple immediate that must be a number from 0-255
val, err := simpleImm(args[i], imm.Name)
if err != nil {
+ if strings.Contains(err.Error(), "unable to parse") {
+ // Perhaps the field works in a different order
+ for j, otherImm := range spec.OpDetails.Immediates {
+ if otherImm.kind == immByte && otherImm.Group != nil {
+ if _, match := otherImm.Group.SpecByName(args[i]); match {
+ correctImmediates = append(correctImmediates, strconv.Itoa(j+1))
+ }
+ }
+ }
+ if len(correctImmediates) > 0 {
+ errMsg := spec.Name
+ if isPseudoName {
+ errMsg += " with " + joinIntsOnOr("immediate", len(args))
+ }
+ return ops.errorf("%s can only use %#v as immediate %s", errMsg, args[i], strings.Join(correctImmediates, " or "))
+ }
+ }
return ops.errorf("%s %w", spec.Name, err)
}
ops.pending.WriteByte(val)
@@ -1184,17 +1200,178 @@ func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, StackTypes{scratchType}
}
-// keywords or "pseudo-ops" handle parsing and assembling special asm language
-// constructs like 'addr' We use an OpSpec here, but it's somewhat degenerate,
-// since they don't have opcodes or eval functions. But it does need a lot of
-// OpSpec, in order to support assembly - Mode, typing info, etc.
-var keywords = map[string]OpSpec{
- "int": {0, "int", nil, proto(":i"), 1, assembler(asmInt)},
- "byte": {0, "byte", nil, proto(":b"), 1, assembler(asmByte)},
+func joinIntsOnOr(singularTerminator string, list ...int) string {
+ if len(list) == 1 {
+ switch list[0] {
+ case 0:
+ return "no " + singularTerminator + "s"
+ case 1:
+ return "1 " + singularTerminator
+ default:
+ return fmt.Sprintf("%d %ss", list[0], singularTerminator)
+ }
+ }
+ sort.Ints(list)
+ errMsg := ""
+ for i, val := range list {
+ if i+1 < len(list) {
+ errMsg += fmt.Sprintf("%d or ", val)
+ } else {
+ errMsg += fmt.Sprintf("%d ", val)
+ }
+ }
+ return errMsg + singularTerminator + "s"
+}
+
+func pseudoImmediatesError(ops *OpStream, name string, specs map[int]OpSpec) {
+ immediateCounts := make([]int, len(specs))
+ i := 0
+ for numImms := range specs {
+ immediateCounts[i] = numImms
+ i++
+ }
+ ops.error(name + " expects " + joinIntsOnOr("immediate argument", immediateCounts...))
+}
+
+// getSpec finds the OpSpec we need during assembly based on its name, our current version, and the immediates passed in
+// Note getSpec handles both normal OpSpecs and those supplied by versionedPseudoOps
+// The returned string is the spec's name, annotated if it was a pseudoOp with no immediates to help disambiguate typetracking errors
+func getSpec(ops *OpStream, name string, args []string) (OpSpec, string, bool) {
+ pseudoSpecs, ok := ops.versionedPseudoOps[name]
+ if ok {
+ pseudo, ok := pseudoSpecs[len(args)]
+ if !ok {
+ // Could be that pseudoOp wants to handle immediates itself so check -1 key
+ pseudo, ok = pseudoSpecs[anyImmediates]
+ if !ok {
+ // Number of immediates supplied did not match any of the pseudoOps of the given name, so we try to construct a mock spec that can be used to track types
+ pseudoImmediatesError(ops, name, pseudoSpecs)
+ proto, version, ok := mergeProtos(pseudoSpecs)
+ if !ok {
+ return OpSpec{}, "", false
+ }
+ pseudo = OpSpec{Name: name, Proto: proto, Version: version, OpDetails: OpDetails{asm: func(*OpStream, *OpSpec, []string) error { return nil }}}
+ }
+ }
+ pseudo.Name = name
+ if pseudo.Version > ops.Version {
+ ops.errorf("%s opcode with %s was introduced in v%d", pseudo.Name, joinIntsOnOr("immediate", len(args)), pseudo.Version)
+ }
+ if len(args) == 0 {
+ return pseudo, pseudo.Name + " without immediates", true
+ }
+ return pseudo, pseudo.Name, true
+ }
+ spec, ok := OpsByName[ops.Version][name]
+ if !ok {
+ spec, ok = OpsByName[AssemblerMaxVersion][name]
+ if ok {
+ ops.errorf("%s opcode was introduced in v%d", name, spec.Version)
+ } else {
+ ops.errorf("unknown opcode: %s", name)
+ }
+ }
+ return spec, spec.Name, ok
+}
+
+// pseudoOps allows us to provide convenient ops that mirror existing ops without taking up another opcode. Using "txn" in version 2 and on, for example, determines whether to actually assemble txn or to use txna instead based on the number of immediates.
+// Immediates key of -1 means asmfunc handles number of immediates
+// These will then get transferred over into a per-opstream versioned table during assembly
+const anyImmediates = -1
+
+var pseudoOps = map[string]map[int]OpSpec{
+ "int": {anyImmediates: OpSpec{Name: "int", Proto: proto(":i"), OpDetails: assembler(asmInt)}},
+ "byte": {anyImmediates: OpSpec{Name: "byte", Proto: proto(":b"), OpDetails: assembler(asmByte)}},
// parse basics.Address, actually just another []byte constant
- "addr": {0, "addr", nil, proto(":b"), 1, assembler(asmAddr)},
+ "addr": {anyImmediates: OpSpec{Name: "addr", Proto: proto(":b"), OpDetails: assembler(asmAddr)}},
// take a signature, hash it, and take first 4 bytes, actually just another []byte constant
- "method": {0, "method", nil, proto(":b"), 1, assembler(asmMethod)},
+ "method": {anyImmediates: OpSpec{Name: "method", Proto: proto(":b"), OpDetails: assembler(asmMethod)}},
+ "txn": {1: OpSpec{Name: "txn"}, 2: OpSpec{Name: "txna"}},
+ "gtxn": {2: OpSpec{Name: "gtxn"}, 3: OpSpec{Name: "gtxna"}},
+ "gtxns": {1: OpSpec{Name: "gtxns"}, 2: OpSpec{Name: "gtxnsa"}},
+ "extract": {0: OpSpec{Name: "extract3"}, 2: OpSpec{Name: "extract"}},
+ "replace": {0: OpSpec{Name: "replace3"}, 1: OpSpec{Name: "replace2"}},
+}
+
+func addPseudoDocTags() {
+ for name, specs := range pseudoOps {
+ for i, spec := range specs {
+ if spec.Name == name || i == anyImmediates {
+ continue
+ }
+ msg := fmt.Sprintf("`%s` can be called using `%s` with %s.", spec.Name, name, joinIntsOnOr("immediate", i))
+ desc, ok := opDocByName[spec.Name]
+ if ok {
+ opDocByName[spec.Name] = desc + "<br />" + msg
+ } else {
+ opDocByName[spec.Name] = msg
+ }
+ }
+ }
+}
+
+func init() {
+ addPseudoDocTags()
+}
+
+// Differentiates between specs in pseudoOps that can be assembled on their own and those that need to grab a different spec
+func isFullSpec(spec OpSpec) bool {
+ return spec.asm != nil
+}
+
+// mergeProtos allows us to support typetracking of pseudo-ops which are given an improper number of immediates
+//by creating a new proto that is a combination of all the pseudo-op's possibilities
+func mergeProtos(specs map[int]OpSpec) (Proto, uint64, bool) {
+ var args StackTypes
+ var returns StackTypes
+ var minVersion uint64
+ i := 0
+ for _, spec := range specs {
+ if i == 0 {
+ args = spec.Arg.Types
+ returns = spec.Return.Types
+ minVersion = spec.Version
+ } else {
+ if spec.Version < minVersion {
+ minVersion = spec.Version
+ }
+ if len(args) != len(spec.Arg.Types) || len(returns) != len(spec.Return.Types) {
+ return Proto{}, 0, false
+ }
+ for j := range args {
+ if args[j] != spec.Arg.Types[j] {
+ args[j] = StackAny
+ }
+ }
+ for j := range returns {
+ if returns[j] != spec.Return.Types[j] {
+ returns[j] = StackAny
+ }
+ }
+ }
+ i++
+ }
+ return Proto{typedList{args, ""}, typedList{returns, ""}}, minVersion, true
+}
+
+func prepareVersionedPseudoTable(version uint64) map[string]map[int]OpSpec {
+ m := make(map[string]map[int]OpSpec)
+ for name, specs := range pseudoOps {
+ m[name] = make(map[int]OpSpec)
+ for numImmediates, spec := range specs {
+ if isFullSpec(spec) {
+ m[name][numImmediates] = spec
+ continue
+ }
+ newSpec, ok := OpsByName[version][spec.Name]
+ if ok {
+ m[name][numImmediates] = newSpec
+ } else {
+ m[name][numImmediates] = OpsByName[AssemblerMaxVersion][spec.Name]
+ }
+ }
+ }
+ return m
}
type lineError struct {
@@ -1387,8 +1564,10 @@ func (ops *OpStream) assemble(text string) error {
if ops.Version == assemblerNoVersion {
ops.Version = AssemblerDefaultVersion
}
+ if ops.versionedPseudoOps == nil {
+ ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
+ }
opstring := fields[0]
-
if opstring[len(opstring)-1] == ':' {
ops.createLabel(opstring[:len(opstring)-1])
fields = fields[1:]
@@ -1398,26 +1577,7 @@ func (ops *OpStream) assemble(text string) error {
}
opstring = fields[0]
}
-
- spec, ok := OpsByName[ops.Version][opstring]
- if !ok {
- spec, ok = keywords[opstring]
- if spec.Version > 1 && spec.Version > ops.Version {
- ok = false
- }
- }
- if !ok {
- // If the problem is only the version, it's useful to lookup the
- // opcode from latest version, so we proceed with assembly well
- // enough to report follow-on errors. Of course, we still have to
- // bail out on the assembly as a whole.
- spec, ok = OpsByName[AssemblerMaxVersion][opstring]
- if ok {
- ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
- } else {
- spec, ok = keywords[opstring]
- }
- }
+ spec, expandedName, ok := getSpec(ops, opstring, fields[1:])
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
ops.recordSourceLine()
@@ -1425,8 +1585,8 @@ func (ops *OpStream) assemble(text string) error {
ops.HasStatefulOps = true
}
args, returns := spec.Arg.Types, spec.Return.Types
- if spec.OpDetails.refine != nil {
- nargs, nreturns := spec.OpDetails.refine(&ops.known, fields[1:])
+ if spec.refine != nil {
+ nargs, nreturns := spec.refine(&ops.known, fields[1:])
if nargs != nil {
args = nargs
}
@@ -1434,7 +1594,7 @@ func (ops *OpStream) assemble(text string) error {
returns = nreturns
}
}
- ops.trackStack(args, returns, fields)
+ ops.trackStack(args, returns, append([]string{expandedName}, fields[1:]...))
spec.asm(ops, &spec, fields[1:])
if spec.deadens() { // An unconditional branch deadens the following code
ops.known.deaden()
@@ -1445,12 +1605,10 @@ func (ops *OpStream) assemble(text string) error {
}
ops.trace("\n")
continue
- } else {
- ops.errorf("unknown opcode: %s", opstring)
}
}
- // backward compatibility: do not allow jumps behind last instruction in TEAL v1
+ // backward compatibility: do not allow jumps behind last instruction in v1
if ops.Version <= 1 {
for label, dest := range ops.labels {
if dest == ops.pending.Len() {
@@ -1506,7 +1664,7 @@ func (ops *OpStream) pragma(line string) error {
// We initialize Version with assemblerNoVersion as a marker for
// non-specified version because version 0 is valid
- // version for TEAL v1.
+ // version for v1.
if ops.Version == assemblerNoVersion {
ops.Version = ver
} else if ops.Version != ver {
@@ -1553,7 +1711,7 @@ func (ops *OpStream) resolveLabels() {
// all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op
naturalPc := lr.position + 3
if ops.Version < backBranchEnabledVersion && dest < naturalPc {
- ops.errorf("label %#v is a back reference, back jump support was introduced in TEAL v4", lr.label)
+ ops.errorf("label %#v is a back reference, back jump support was introduced in v4", lr.label)
continue
}
jump := dest - naturalPc
@@ -2111,7 +2269,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
}
if strings.HasPrefix(spec.Name, "bytec_") {
b := spec.Name[len(spec.Name)-1] - byte('0')
- if int(b) < len(dis.intc) {
+ if int(b) < len(dis.bytec) {
out += fmt.Sprintf(" // %s", guessByteFormat(dis.bytec[b]))
}
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 478eb7c21..24d9dffd0 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -363,6 +363,12 @@ pushint 1
gitxnas 0 Logs
`
+const randomnessNonsense = `
+pushint 0xffff
+block BlkTimestamp
+vrf_verify VrfAlgorand
+`
+
const v7Nonsense = v6Nonsense + `
base64_decode URLEncoding
json_ref JSONUint64
@@ -378,17 +384,25 @@ pushbytes 0x012345
dup
dup
ed25519verify_bare
+` + randomnessNonsense + `
pushbytes 0x4321
pushbytes 0x77
replace2 2
pushbytes 0x88
pushint 1
replace3
-` + pairingNonsense
+`
+
+const v8Nonsense = v7Nonsense + pairingNonsense
const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
-const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984800243218001775c0280018881015d" + pairingCompiled
+const randomnessCompiled = "81ffff03d101d000"
+
+const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" +
+ randomnessCompiled + "800243218001775c0280018881015d"
+
+const v8Compiled = v7Compiled + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -398,6 +412,7 @@ var nonsense = map[uint64]string{
5: v5Nonsense,
6: v6Nonsense,
7: v7Nonsense,
+ 8: v8Nonsense,
}
var compiled = map[uint64]string{
@@ -408,6 +423,7 @@ var compiled = map[uint64]string{
5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03",
6: "06" + v6Compiled,
7: "07" + v7Compiled,
+ 8: "08" + v8Compiled,
}
func pseudoOp(opcode string) bool {
@@ -433,7 +449,7 @@ func TestAssemble(t *testing.T) {
// This doesn't have to be a sensible program to run, it just has to compile.
t.Parallel()
- require.Equal(t, LogicVersion, len(nonsense))
+ require.LessOrEqual(t, LogicVersion, len(nonsense)) // Allow nonsense for future versions
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
for _, spec := range OpSpecs {
@@ -456,7 +472,7 @@ func TestAssemble(t *testing.T) {
}
}
-var experiments = []uint64{fidoVersion, pairingVersion}
+var experiments = []uint64{pairingVersion}
// TestExperimental forces a conscious choice to promote "experimental" opcode
// groups. This will fail when we increment vFuture's LogicSigVersion. If we had
@@ -465,7 +481,8 @@ var experiments = []uint64{fidoVersion, pairingVersion}
func TestExperimental(t *testing.T) {
futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
for _, v := range experiments {
- require.Equal(t, futureV, v)
+ // Allows less, so we can push something out, even before vFuture has been updated.
+ require.LessOrEqual(t, futureV, v)
}
}
@@ -502,20 +519,47 @@ func testMatch(t testing.TB, actual, expected string) bool {
}
}
-func assemblyTrace(text string, ver uint64) string {
+func assembleWithTrace(text string, ver uint64) (*OpStream, error) {
ops := newOpStream(ver)
ops.Trace = &strings.Builder{}
- ops.assemble(text)
- return ops.Trace.String()
+ err := ops.assemble(text)
+ return &ops, err
+}
+
+func lines(s string, num int) (bool, string) {
+ if num < 1 {
+ return true, ""
+ }
+ found := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] == '\n' {
+ found++
+ if found == num {
+ return true, s[0 : i+1]
+ }
+ }
+ }
+ return false, s
+}
+
+func summarize(trace *strings.Builder) string {
+ truncated, msg := lines(trace.String(), 50)
+ if !truncated {
+ return msg
+ }
+ return msg + "(trace truncated)\n"
}
func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpStream {
t.Helper()
program := strings.ReplaceAll(source, ";", "\n")
- ops, err := AssembleStringWithVersion(program, ver)
+ ops, err := assembleWithTrace(program, ver)
if len(expected) == 0 {
if len(ops.Errors) > 0 || err != nil || ops == nil || ops.Program == nil {
- t.Log(assemblyTrace(program, ver))
+ t.Log(summarize(ops.Trace))
+ }
+ if len(ops.Errors) > 10 {
+ ops.Errors = ops.Errors[:10] // Truncate to reasonable
}
require.Empty(t, ops.Errors)
require.NoError(t, err)
@@ -553,7 +597,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
}
}
if fail {
- t.Log(assemblyTrace(program, ver))
+ t.Log(summarize(ops.Trace))
t.FailNow()
}
} else {
@@ -570,7 +614,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
require.NotNil(t, found, "Error %s was not found on line %d", exp.s, exp.l)
msg := found.Unwrap().Error()
if !testMatch(t, msg, exp.s) {
- t.Log(assemblyTrace(program, ver))
+ t.Log(summarize(ops.Trace))
t.FailNow()
}
}
@@ -604,25 +648,47 @@ func TestAssembleTxna(t *testing.T) {
testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna t beyond 255: 256")
testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"")
- testLine(t, "txn Accounts 0", 1, "txn expects 1 immediate argument")
+ testLine(t, "gtxna ApplicationArgs 0 255", AssemblerMaxVersion, "gtxna can only use \"ApplicationArgs\" as immediate 2")
+ testLine(t, "gtxna 0 255 ApplicationArgs", AssemblerMaxVersion, "gtxna can only use \"255\" as immediate 1 or 3")
+
+ testLine(t, "txn Accounts 256", AssemblerMaxVersion, "txn i beyond 255: 256")
+ testLine(t, "txn ApplicationArgs 256", AssemblerMaxVersion, "txn i beyond 255: 256")
+ testLine(t, "txn 255 ApplicationArgs", AssemblerMaxVersion, "txn with 2 immediates can only use \"255\" as immediate 2")
+ testLine(t, "txn Sender 256", AssemblerMaxVersion, "\"Sender\" field of txn can only be used with 1 immediate")
+ testLine(t, "gtxn 0 Accounts 256", AssemblerMaxVersion, "gtxn i beyond 255: 256")
+ testLine(t, "gtxn 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxn i beyond 255: 256")
+ testLine(t, "gtxn 256 Accounts 0", AssemblerMaxVersion, "gtxn t beyond 255: 256")
+ testLine(t, "gtxn 0 Sender 256", AssemblerMaxVersion, "\"Sender\" field of gtxn can only be used with 2 immediates")
+ testLine(t, "gtxn ApplicationArgs 0 255", AssemblerMaxVersion, "gtxn with 3 immediates can only use \"ApplicationArgs\" as immediate 2")
+ testLine(t, "gtxn 0 255 ApplicationArgs", AssemblerMaxVersion, "gtxn with 3 immediates can only use \"255\" as immediate 1 or 3")
+
+ testLine(t, "txn Accounts 0", 1, "txn opcode with 2 immediates was introduced in v2")
testLine(t, "txn Accounts 0 1", 2, "txn expects 1 or 2 immediate arguments")
testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects 2 immediate arguments")
+ testLine(t, "txn Accounts 0 1", AssemblerMaxVersion, "txn expects 1 or 2 immediate arguments")
testLine(t, "txnas Accounts 1", AssemblerMaxVersion, "txnas expects 1 immediate argument")
testLine(t, "txna Accounts a", AssemblerMaxVersion, "txna unable to parse...")
- testLine(t, "gtxn 0 Sender 0", 1, "gtxn expects 2 immediate arguments")
+ testLine(t, "txn Accounts a", AssemblerMaxVersion, "txn unable to parse...")
+ testLine(t, "gtxn 0 Sender 0", 1, "gtxn opcode with 3 immediates was introduced in v2")
testLine(t, "gtxn 0 Sender 1 2", 2, "gtxn expects 2 or 3 immediate arguments")
testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects 3 immediate arguments")
testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "gtxna unable to parse...")
testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "gtxna unable to parse...")
+
+ testLine(t, "gtxn 0 Accounts 1 2", AssemblerMaxVersion, "gtxn expects 2 or 3 immediate arguments")
+ testLine(t, "gtxn a Accounts 0", AssemblerMaxVersion, "gtxn unable to parse...")
+ testLine(t, "gtxn 0 Accounts a", AssemblerMaxVersion, "gtxn unable to parse...")
+
testLine(t, "gtxnas Accounts 1 2", AssemblerMaxVersion, "gtxnas expects 2 immediate arguments")
testLine(t, "txn ABC", 2, "txn unknown field: \"ABC\"")
testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: \"ABC\"")
testLine(t, "gtxn a ABC", 2, "gtxn unable to parse...")
- testLine(t, "txn Accounts", 1, "txn unknown field: \"Accounts\"")
- testLine(t, "txn Accounts", AssemblerMaxVersion, "txn unknown field: \"Accounts\"")
+ // For now not going to additionally report version issue until version is only problem
+ testLine(t, "txn Accounts", 1, "\"Accounts\" field of txn can only be used with 2 immediates")
+ testLine(t, "txn Accounts", AssemblerMaxVersion, "\"Accounts\" field of txn can only be used with 2 immediates")
testLine(t, "txn Accounts 0", AssemblerMaxVersion, "")
- testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "gtxn unknown field: \"Accounts\"...")
- testLine(t, "gtxn 0 Accounts", 1, "gtxn unknown field: \"Accounts\"")
+ testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "\"Accounts\" field of gtxn can only be used with 3 immediates")
+ testLine(t, "gtxn 0 Accounts", 1, "\"Accounts\" field of gtxn can only be used with 3 immediates")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
}
@@ -1464,6 +1530,10 @@ itxn NumLogs
itxn CreatedAssetID
itxn CreatedApplicationID
itxn LastLog
+txn NumApprovalProgramPages
+txna ApprovalProgramPages 0
+txn NumClearStateProgramPages
+txna ClearStateProgramPages 0
`, AssemblerMaxVersion)
for _, globalField := range GlobalFieldNames {
if !strings.Contains(text, globalField) {
@@ -1495,8 +1565,11 @@ func TestAssembleDisassembleCycle(t *testing.T) {
// optimizations in later versions that change the bytecode
// emitted. But currently it is, so we test it for now to
// catch any suprises.
- require.Equal(t, LogicVersion, len(nonsense))
+ require.LessOrEqual(t, LogicVersion, len(nonsense)) // Allow nonsense for future versions
for v, source := range nonsense {
+ if v > LogicVersion {
+ continue // We allow them to be set, but can't test assembly beyond LogicVersion
+ }
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
@@ -1682,7 +1755,7 @@ func TestAssembleVersions(t *testing.T) {
testLine(t, "txna Accounts 0", AssemblerMaxVersion, "")
testLine(t, "txna Accounts 0", 2, "")
- testLine(t, "txna Accounts 0", 1, "txna opcode was introduced in TEAL v2")
+ testLine(t, "txna Accounts 0", 1, "txna opcode was introduced in v2")
}
func TestAssembleBalance(t *testing.T) {
@@ -1873,7 +1946,7 @@ func TestDisassembleLastLabel(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- // starting from TEAL v2 branching to the last line are legal
+ // starting from v2 branching to the last line are legal
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
source := fmt.Sprintf(`#pragma version %d
@@ -2215,7 +2288,7 @@ int 1
require.NoError(t, err)
require.Equal(t, ops2.Program, ops.Program)
- // check if no version it defaults to TEAL v1
+ // check if no version it defaults to v1
text = `byte "test"
len
`
@@ -2475,8 +2548,8 @@ func TestBadInnerFields(t *testing.T) {
testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{4, "...is not allowed."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in TEAL v6..."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in TEAL v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in v6..."})
testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{4, "...is not allowed."})
testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{3, "...is not allowed."})
@@ -2572,3 +2645,73 @@ done:
concat
`, LogicVersion, Expect{5, "concat arg 1 wanted type []byte..."})
}
+
+func TestMergeProtos(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ iVi := OpSpec{Proto: proto("i:i")}
+ bVb := OpSpec{Proto: proto("b:b")}
+ aaVa := OpSpec{Proto: proto("aa:a")}
+ aVaa := OpSpec{Proto: proto("a:aa")}
+ p, _, _ := mergeProtos(map[int]OpSpec{0: iVi, 1: bVb})
+ require.Equal(t, proto("a:a"), p)
+ _, _, ok := mergeProtos(map[int]OpSpec{0: aaVa, 1: iVi})
+ require.False(t, ok)
+ _, _, ok = mergeProtos(map[int]OpSpec{0: aVaa, 1: iVi})
+ require.False(t, ok)
+ medley := OpSpec{Proto: proto("aibibabai:aibibabai")}
+ medley2 := OpSpec{Proto: proto("biabbaiia:biabbaiia")}
+ p, _, _ = mergeProtos(map[int]OpSpec{0: medley, 1: medley2})
+ require.Equal(t, proto("aiaabaaaa:aiaabaaaa"), p)
+ v1 := OpSpec{Version: 1, Proto: proto(":")}
+ v2 := OpSpec{Version: 2, Proto: proto(":")}
+ _, v, _ := mergeProtos(map[int]OpSpec{0: v2, 1: v1})
+ require.Equal(t, uint64(1), v)
+}
+
+// Extra tests for features of getSpec that are currently not tested elsewhere
+func TestGetSpec(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ ops, _ := AssembleStringWithVersion("int 1", AssemblerMaxVersion)
+ ops.versionedPseudoOps["dummyPseudo"] = make(map[int]OpSpec)
+ ops.versionedPseudoOps["dummyPseudo"][1] = OpSpec{Name: "b:", Version: AssemblerMaxVersion, Proto: proto("b:")}
+ ops.versionedPseudoOps["dummyPseudo"][2] = OpSpec{Name: ":", Version: AssemblerMaxVersion}
+ _, _, ok := getSpec(ops, "dummyPseudo", []string{})
+ require.False(t, ok)
+ _, _, ok = getSpec(ops, "nonsense", []string{})
+ require.False(t, ok)
+ require.Equal(t, 2, len(ops.Errors))
+ require.Equal(t, "unknown opcode: nonsense", ops.Errors[1].Err.Error())
+}
+
+func TestAddPseudoDocTags(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // Not parallel because it modifies pseudoOps and opDocByName which are global maps
+ // t.Parallel()
+ defer func() {
+ delete(pseudoOps, "tests")
+ delete(opDocByName, "multiple")
+ delete(opDocByName, "single")
+ delete(opDocByName, "none")
+ delete(opDocByName, "any")
+ }()
+
+ pseudoOps["tests"] = map[int]OpSpec{2: OpSpec{Name: "multiple"}, 1: OpSpec{Name: "single"}, 0: OpSpec{Name: "none"}, anyImmediates: OpSpec{Name: "any"}}
+ addPseudoDocTags()
+ require.Equal(t, "`multiple` can be called using `tests` with 2 immediates.", opDocByName["multiple"])
+ require.Equal(t, "`single` can be called using `tests` with 1 immediate.", opDocByName["single"])
+ require.Equal(t, "`none` can be called using `tests` with no immediates.", opDocByName["none"])
+ require.Equal(t, "", opDocByName["any"])
+}
+func TestReplacePseudo(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ replaceVersion := 7
+ for v := uint64(replaceVersion); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "byte 0x0000; byte 0x1234; replace 0", v)
+ testProg(t, "byte 0x0000; int 0; byte 0x1234; replace", v)
+ testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{3, "replace without immediates expects 3 stack arguments but stack height is 2"})
+ testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{4, "replace 0 arg 0 wanted type []byte got uint64"})
+ }
+}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index e2503e887..086741dcd 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -28,9 +28,9 @@ import (
"github.com/stretchr/testify/require"
)
-// This test ensures a program compiled with by pre-TEAL v2 go-algorand
-// that includes all the opcodes from TEAL v1 runs in TEAL v2 runModeSignature well
-var sourceTEALv1 = `byte 0x41 // A
+// This test ensures a program compiled with by pre-AVM v2 go-algorand
+// that includes all the opcodes from AVM v1 runs in AVM v2 runModeSignature well
+var sourceV1 = `byte 0x41 // A
sha256
byte 0x559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd
==
@@ -246,7 +246,7 @@ dup
==
`
-var programTEALv1 = "01200500010220ffffffffffffffffff012608014120559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd0142201f675bff07515f5df96737194ea945c36c41e7b4fcef307b7cd4d0e602a6911101432034b99f8dde1ba273c0a28cf5b2e4dbe497f8cb2453de0c8ba6d578c9431a62cb0100200000000000000000000000000000000000000000000000000000000000000000280129122a022b1210270403270512102d2e2f041022082209230a230b240c220d230e230f231022112312231314301525121617182319231a221b21041c1d12222312242512102104231210482829122a2b121027042706121048310031071331013102121022310413103105310613103108311613103109310a1210310b310f1310310c310d1210310e31101310311131121310311331141210311531171210483300003300071333000133000212102233000413103300053300061310330008330016131033000933000a121033000b33000f131033000c33000d121033000e3300101310330011330012131033001333001412103300153300171210483200320112320232041310320327071210350034001040000100234912"
+var programV1 = "01200500010220ffffffffffffffffff012608014120559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd0142201f675bff07515f5df96737194ea945c36c41e7b4fcef307b7cd4d0e602a6911101432034b99f8dde1ba273c0a28cf5b2e4dbe497f8cb2453de0c8ba6d578c9431a62cb0100200000000000000000000000000000000000000000000000000000000000000000280129122a022b1210270403270512102d2e2f041022082209230a230b240c220d230e230f231022112312231314301525121617182319231a221b21041c1d12222312242512102104231210482829122a2b121027042706121048310031071331013102121022310413103105310613103108311613103109310a1210310b310f1310310c310d1210310e31101310311131121310311331141210311531171210483300003300071333000133000212102233000413103300053300061310330008330016131033000933000a121033000b33000f131033000c33000d121033000e3300101310330011330012131033001333001412103300153300171210483200320112320232041310320327071210350034001040000100234912"
func TestBackwardCompatTEALv1(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -260,15 +260,15 @@ func TestBackwardCompatTEALv1(t *testing.T) {
require.NoError(t, err)
pk := basics.Address(c.SignatureVerifier)
- program, err := hex.DecodeString(programTEALv1)
+ program, err := hex.DecodeString(programV1)
require.NoError(t, err)
// ensure old program is the same as a new one when assembling without version
- ops, err := AssembleString(sourceTEALv1)
+ ops, err := AssembleString(sourceV1)
require.NoError(t, err)
require.Equal(t, program, ops.Program)
- // ensure the old program is the same as a new one except TEAL version byte
- opsV2, err := AssembleStringWithVersion(sourceTEALv1, 2)
+ // ensure the old program is the same as a new one except AVM version byte
+ opsV2, err := AssembleStringWithVersion(sourceV1, 2)
require.NoError(t, err)
require.Equal(t, program[1:], opsV2.Program[1:])
@@ -278,7 +278,7 @@ func TestBackwardCompatTEALv1(t *testing.T) {
})
ep, tx, _ := makeSampleEnvWithVersion(1)
- // RekeyTo disallowed on TEAL v0/v1
+ // RekeyTo disallowed on AVM v0/v1
tx.RekeyTo = basics.Address{}
ep.TxnGroup[0].Lsig.Logic = program
@@ -327,7 +327,7 @@ func TestBackwardCompatTEALv1(t *testing.T) {
// Cost remains the same, because v0 does not get dynamic treatment
ep.Proto.LogicSigMaxCost = 2139
- ep.MinTealVersion = new(uint64) // Was higher because sample txn has a rekey
+ ep.MinAvmVersion = new(uint64) // Was higher because sample txn has a rekey
testLogicBytes(t, program, ep, "static cost", "")
ep.Proto.LogicSigMaxCost = 2140
@@ -343,7 +343,7 @@ func TestBackwardCompatTEALv1(t *testing.T) {
testLogicBytes(t, program, ep)
}
-// ensure v2 fields error on pre TEAL v2 logicsig version
+// ensure v2 fields error on pre v2 logicsig version
// ensure v2 fields error in v1 program
func TestBackwardCompatGlobalFields(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -368,7 +368,7 @@ func TestBackwardCompatGlobalFields(t *testing.T) {
ops := testProg(t, text, AssemblerMaxVersion)
ep, _, _ := makeSampleEnvWithVersion(1)
- ep.TxnGroup[0].Txn.RekeyTo = basics.Address{} // avoid min teal version issues
+ ep.TxnGroup[0].Txn.RekeyTo = basics.Address{} // avoid min version issues
ep.TxnGroup[0].Lsig.Logic = ops.Program
_, err := EvalSignature(0, ep)
require.Error(t, err)
@@ -408,13 +408,13 @@ func TestBackwardCompatTxnFields(t *testing.T) {
for _, fs := range fields {
field := fs.field.String()
- for _, command := range tests {
+ for i, command := range tests {
text := fmt.Sprintf(command, field)
asmError := "...was introduced in ..."
if fs.array {
parts := strings.Split(text, " ")
op := parts[0]
- asmError = fmt.Sprintf("%s unknown field: %#v", op, field)
+ asmError = fmt.Sprintf("%#v field of %s can only be used with %d immediates", field, op, i+2)
}
// check assembler fails in versions before introduction
testLine(t, text, assemblerNoVersion, asmError)
@@ -434,7 +434,7 @@ func TestBackwardCompatTxnFields(t *testing.T) {
ep, tx, _ := makeSampleEnvWithVersion(1)
// We'll reject too early if we have a nonzero RekeyTo, because that
// field must be zero for every txn in the group if this is an old
- // TEAL version
+ // AVM version
tx.RekeyTo = basics.Address{}
ep.TxnGroup[0].Lsig.Logic = ops.Program
@@ -461,8 +461,8 @@ func TestBackwardCompatTxnFields(t *testing.T) {
func TestBackwardCompatAssemble(t *testing.T) {
partitiontest.PartitionTest(t)
- // TEAL v1 does not allow branching to the last line
- // TEAL v2 makes such programs legal
+ // v1 does not allow branching to the last line
+ // v2 makes such programs legal
t.Parallel()
source := "int 1; int 1; bnz done; done:"
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 16dabf605..1a43995c2 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -117,7 +117,7 @@ var opDocByName = map[string]string{
"gaid": "ID of the asset or application created in the Tth transaction of the current group",
"gaids": "ID of the asset or application created in the Ath transaction of the current group",
- "json_ref": "return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A",
+ "json_ref": "key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A",
"bnz": "branch to TARGET if value A is not zero",
"bz": "branch to TARGET if value A is zero",
@@ -190,6 +190,9 @@ var opDocByName = map[string]string{
"itxn_next": "begin preparation of a new inner transaction in the same transaction group",
"itxn_field": "set field F of the current inner transaction to A",
"itxn_submit": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.",
+
+ "vrf_verify": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.",
+ "block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
}
// OpDoc returns a description of the op
@@ -255,6 +258,9 @@ var opcodeImmediateNotes = map[string]string{
"base64_decode": "{uint8 encoding index}",
"json_ref": "{string return type}",
+
+ "vrf_verify": "{uint8 parameters index}",
+ "block": "{uint8 block field}",
}
// OpImmediateNote returns a short string about immediate data which follows the op byte
@@ -264,6 +270,7 @@ func OpImmediateNote(opName string) string {
// further documentation on the function of the opcode
var opDocExtras = map[string]string{
+ "vrf_verify": "`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).",
"ed25519verify": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"ecdsa_verify": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.",
"ecdsa_pk_decompress": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.",
@@ -284,7 +291,6 @@ var opDocExtras = map[string]string{
"bitlen": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit",
"divw": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.",
"divmodw": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.",
- "txn": "FirstValidTime causes the program to fail. The field is reserved for future use.",
"gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
"gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
"gload": "`gload` fails unless the requested transaction is an ApplicationCall and T < GroupIndex.",
@@ -315,8 +321,8 @@ var opDocExtras = map[string]string{
"itxn_next": "`itxn_next` initializes the transaction exactly as `itxn_begin` does",
"itxn_field": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
"itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
- "base64_decode": "Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See <a href=\"https://rfc-editor.org/rfc/rfc4648.html#section-4\">RFC 4648</a> (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
- "json_ref": "specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.",
+ "base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
+ "json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
}
// OpDocExtra returns extra documentation text about an op
@@ -328,13 +334,13 @@ func OpDocExtra(opName string) string {
// here is the order args opcodes are presented, so place related
// opcodes consecutively, even if their opcode values are not.
var OpGroups = map[string][]string{
- "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "bn256_add", "bn256_scalar_mul", "bn256_pairing", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
+ "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "bn256_add", "bn256_scalar_mul", "bn256_pairing", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
"Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
"Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
- "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log"},
+ "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index dae291b6e..b648ff778 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -40,6 +40,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/secp256k1"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
@@ -70,7 +71,7 @@ const maxLogCalls = 32
// you count the top-level app call.
const maxAppCallDepth = 8
-// maxStackDepth should not change unless controlled by a teal version change
+// maxStackDepth should not change unless controlled by an AVM version change
const maxStackDepth = 1000
// stackValue is the type for the operand stack.
@@ -171,13 +172,13 @@ func stackValueFromTealValue(tv basics.TealValue) (sv stackValue, err error) {
return
}
-// ComputeMinTealVersion calculates the minimum safe TEAL version that may be
+// ComputeMinAvmVersion calculates the minimum safe AVM version that may be
// used by a transaction in this group. It is important to prevent
// newly-introduced transaction fields from breaking assumptions made by older
-// versions of TEAL. If one of the transactions in a group will execute a TEAL
+// versions of the AVM. If one of the transactions in a group will execute a TEAL
// program whose version predates a given field, that field must not be set
// anywhere in the transaction group, or the group will be rejected.
-func ComputeMinTealVersion(group []transactions.SignedTxnWithAD) uint64 {
+func ComputeMinAvmVersion(group []transactions.SignedTxnWithAD) uint64 {
var minVersion uint64
for _, txn := range group {
if !txn.Txn.RekeyTo.IsZero() {
@@ -194,12 +195,30 @@ func ComputeMinTealVersion(group []transactions.SignedTxnWithAD) uint64 {
return minVersion
}
+// LedgerForSignature represents the parts of Ledger that LogicSigs can see. It
+// only exposes things that consensus has already agreed upon, so it is
+// "stateless" for signature purposes.
+type LedgerForSignature interface {
+ BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
+}
+
+// NoHeaderLedger is intended for debugging situations in which it is reasonable
+// to preclude the use of `block` and `txn LastValidTime`
+type NoHeaderLedger struct {
+}
+
+// BlockHdrCached always errors
+func (NoHeaderLedger) BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, fmt.Errorf("no block header access")
+}
+
// LedgerForLogic represents ledger API for Stateful TEAL program
type LedgerForLogic interface {
AccountData(addr basics.Address) (ledgercore.AccountData, error)
Authorizer(addr basics.Address) (basics.Address, error)
Round() basics.Round
LatestTimestamp() int64
+ BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error)
AssetParams(aidx basics.AssetIndex) (basics.AssetParams, basics.Address, error)
@@ -237,15 +256,16 @@ type EvalParams struct {
logger logging.Logger
- Ledger LedgerForLogic
+ SigLedger LedgerForSignature
+ Ledger LedgerForLogic
// optional debugger
Debugger DebuggerHook
- // MinTealVersion is the minimum allowed TEAL version of this program.
+ // MinAvmVersion is the minimum allowed AVM version of this program.
// The program must reject if its version is less than this version. If
- // MinTealVersion is nil, we will compute it ourselves
- MinTealVersion *uint64
+ // MinAvmVersion is nil, we will compute it ourselves
+ MinAvmVersion *uint64
// Amount "overpaid" by the transactions of the group. Often 0. When
// positive, it can be spent by inner transactions. Shared across a group's
@@ -305,7 +325,7 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
}
}
- minTealVersion := ComputeMinTealVersion(txgroup)
+ minAvmVersion := ComputeMinAvmVersion(txgroup)
var pooledApplicationBudget *int
var pooledAllowedInners *int
@@ -327,7 +347,7 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
Proto: proto,
Specials: specials,
pastScratch: make([]*scratchSpace, len(txgroup)),
- MinTealVersion: &minTealVersion,
+ MinAvmVersion: &minAvmVersion,
FeeCredit: &credit,
PooledApplicationBudget: pooledApplicationBudget,
pooledAllowedInners: pooledAllowedInners,
@@ -344,7 +364,7 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
minFeeCount := uint64(0)
feesPaid := uint64(0)
for _, stxn := range txgroup {
- if stxn.Txn.Type != protocol.CompactCertTx {
+ if stxn.Txn.Type != protocol.StateProofTx {
minFeeCount++
}
feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw)
@@ -357,12 +377,12 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
// NewInnerEvalParams creates an EvalParams to be used while evaluating an inner group txgroup
func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext) *EvalParams {
- minTealVersion := ComputeMinTealVersion(txg)
+ minAvmVersion := ComputeMinAvmVersion(txg)
// Can't happen currently, since earliest inner callable version is higher
// than any minimum imposed otherwise. But is correct to inherit a stronger
// restriction from above, in case of future restriction.
- if minTealVersion < *caller.MinTealVersion {
- minTealVersion = *caller.MinTealVersion
+ if minAvmVersion < *caller.MinAvmVersion {
+ minAvmVersion = *caller.MinAvmVersion
}
// Unlike NewEvalParams, do not add fee credit here. opTxSubmit has already done so.
@@ -380,11 +400,12 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
Trace: caller.Trace,
TxnGroup: txg,
pastScratch: make([]*scratchSpace, len(txg)),
- MinTealVersion: &minTealVersion,
+ MinAvmVersion: &minAvmVersion,
FeeCredit: caller.FeeCredit,
Specials: caller.Specials,
PooledApplicationBudget: caller.PooledApplicationBudget,
pooledAllowedInners: caller.pooledAllowedInners,
+ SigLedger: caller.SigLedger,
Ledger: caller.Ledger,
created: caller.created,
appAddrCache: caller.appAddrCache,
@@ -608,6 +629,9 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
if params.Ledger == nil {
return false, nil, errors.New("no ledger in contract eval")
}
+ if params.SigLedger == nil {
+ params.SigLedger = params.Ledger
+ }
if aid == 0 {
return false, nil, errors.New("0 appId in contract eval")
}
@@ -650,6 +674,9 @@ func EvalApp(program []byte, gi int, aid basics.AppIndex, params *EvalParams) (b
// EvalSignature evaluates the logicsig of the ith transaction in params.
// A program passes successfully if it finishes with one int element on the stack that is non-zero.
func EvalSignature(gi int, params *EvalParams) (pass bool, err error) {
+ if params.SigLedger == nil {
+ return false, errors.New("no sig ledger in signature eval")
+ }
cx := EvalContext{
EvalParams: params,
runModeFlags: modeSig,
@@ -829,12 +856,12 @@ func versionCheck(program []byte, params *EvalParams) (uint64, int, error) {
return 0, 0, fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion)
}
- if params.MinTealVersion == nil {
- minVersion := ComputeMinTealVersion(params.TxnGroup)
- params.MinTealVersion = &minVersion
+ if params.MinAvmVersion == nil {
+ minVersion := ComputeMinAvmVersion(params.TxnGroup)
+ params.MinAvmVersion = &minVersion
}
- if version < *params.MinTealVersion {
- return 0, 0, fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", *params.MinTealVersion, version)
+ if version < *params.MinAvmVersion {
+ return 0, 0, fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", *params.MinAvmVersion, version)
}
return version, vlen, nil
}
@@ -860,6 +887,10 @@ func boolToUint(x bool) uint64 {
return 0
}
+func boolToSV(x bool) stackValue {
+ return stackValue{Uint: boolToUint(x)}
+}
+
func (cx *EvalContext) remainingBudget() int {
if cx.runModeFlags == modeSig {
return int(cx.Proto.LogicSigMaxCost) - cx.cost
@@ -895,7 +926,7 @@ func (cx *EvalContext) step() error {
opcode := cx.program[cx.pc]
spec := &opsByOpcode[cx.version][opcode]
- // this check also ensures TEAL versioning: v2 opcodes are not in opsByOpcode[1] array
+ // this check also ensures versioning: v2 opcodes are not in opsByOpcode[1] array
if spec.op == nil {
return fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
}
@@ -1302,7 +1333,7 @@ func opLt(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := cx.stack[prev].Uint < cx.stack[last].Uint
- cx.stack[prev].Uint = boolToUint(cond)
+ cx.stack[prev] = boolToSV(cond)
cx.stack = cx.stack[:last]
return nil
}
@@ -1328,7 +1359,7 @@ func opAnd(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := (cx.stack[prev].Uint != 0) && (cx.stack[last].Uint != 0)
- cx.stack[prev].Uint = boolToUint(cond)
+ cx.stack[prev] = boolToSV(cond)
cx.stack = cx.stack[:last]
return nil
}
@@ -1337,7 +1368,7 @@ func opOr(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := (cx.stack[prev].Uint != 0) || (cx.stack[last].Uint != 0)
- cx.stack[prev].Uint = boolToUint(cond)
+ cx.stack[prev] = boolToSV(cond)
cx.stack = cx.stack[:last]
return nil
}
@@ -1356,8 +1387,7 @@ func opEq(cx *EvalContext) error {
} else {
cond = cx.stack[prev].Uint == cx.stack[last].Uint
}
- cx.stack[prev].Uint = boolToUint(cond)
- cx.stack[prev].Bytes = nil
+ cx.stack[prev] = boolToSV(cond)
cx.stack = cx.stack[:last]
return nil
}
@@ -1372,8 +1402,7 @@ func opNeq(cx *EvalContext) error {
func opNot(cx *EvalContext) error {
last := len(cx.stack) - 1
- cond := cx.stack[last].Uint == 0
- cx.stack[last].Uint = boolToUint(cond)
+ cx.stack[last] = boolToSV(cx.stack[last].Uint == 0)
return nil
}
@@ -1674,8 +1703,7 @@ func opBytesLt(cx *EvalContext) error {
rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
lhs := new(big.Int).SetBytes(cx.stack[prev].Bytes)
- cx.stack[prev].Bytes = nil
- cx.stack[prev].Uint = boolToUint(lhs.Cmp(rhs) < 0)
+ cx.stack[prev] = boolToSV(lhs.Cmp(rhs) < 0)
cx.stack = cx.stack[:last]
return nil
}
@@ -1711,8 +1739,7 @@ func opBytesEq(cx *EvalContext) error {
rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
lhs := new(big.Int).SetBytes(cx.stack[prev].Bytes)
- cx.stack[prev].Bytes = nil
- cx.stack[prev].Uint = boolToUint(lhs.Cmp(rhs) == 0)
+ cx.stack[prev] = boolToSV(lhs.Cmp(rhs) == 0)
cx.stack = cx.stack[:last]
return nil
}
@@ -2297,6 +2324,19 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
sv.Uint = txn.Fee.Raw
case FirstValid:
sv.Uint = uint64(txn.FirstValid)
+ case FirstValidTime:
+ rnd, err := cx.availableRound(uint64(txn.FirstValid) - 1)
+ if err != nil {
+ return sv, err
+ }
+ hdr, err := cx.SigLedger.BlockHdrCached(rnd)
+ if err != nil {
+ return sv, err
+ }
+ if hdr.TimeStamp < 0 {
+ return sv, fmt.Errorf("block(%d) timestamp %d < 0", txn.FirstValid-1, hdr.TimeStamp)
+ }
+ sv.Uint = uint64(hdr.TimeStamp)
case LastValid:
sv.Uint = uint64(txn.LastValid)
case Note:
@@ -2403,6 +2443,32 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
sv.Bytes = nilToEmpty(txn.ApprovalProgram)
case ClearStateProgram:
sv.Bytes = nilToEmpty(txn.ClearStateProgram)
+ case NumApprovalProgramPages:
+ sv.Uint = uint64(divCeil(len(txn.ApprovalProgram), maxStringSize))
+ case ApprovalProgramPages:
+ pageCount := divCeil(len(txn.ApprovalProgram), maxStringSize)
+ if arrayFieldIdx >= uint64(pageCount) {
+ return sv, fmt.Errorf("invalid ApprovalProgramPages index %d", arrayFieldIdx)
+ }
+ first := arrayFieldIdx * maxStringSize
+ last := first + maxStringSize
+ if last > uint64(len(txn.ApprovalProgram)) {
+ last = uint64(len(txn.ApprovalProgram))
+ }
+ sv.Bytes = txn.ApprovalProgram[first:last]
+ case NumClearStateProgramPages:
+ sv.Uint = uint64(divCeil(len(txn.ClearStateProgram), maxStringSize))
+ case ClearStateProgramPages:
+ pageCount := divCeil(len(txn.ClearStateProgram), maxStringSize)
+ if arrayFieldIdx >= uint64(pageCount) {
+ return sv, fmt.Errorf("invalid ClearStateProgramPages index %d", arrayFieldIdx)
+ }
+ first := arrayFieldIdx * maxStringSize
+ last := first + maxStringSize
+ if last > uint64(len(txn.ClearStateProgram)) {
+ last = uint64(len(txn.ClearStateProgram))
+ }
+ sv.Bytes = txn.ClearStateProgram[first:last]
case RekeyTo:
sv.Bytes = txn.RekeyTo[:]
case ConfigAsset:
@@ -2469,13 +2535,13 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
func (cx *EvalContext) fetchField(field TxnField, expectArray bool) (*txnFieldSpec, error) {
fs, ok := txnFieldSpecByField(field)
if !ok || fs.version > cx.version {
- return nil, fmt.Errorf("invalid txn field %d", field)
+ return nil, fmt.Errorf("invalid txn field %s", field)
}
if expectArray != fs.array {
if expectArray {
- return nil, fmt.Errorf("unsupported array field %d", field)
+ return nil, fmt.Errorf("unsupported array field %s", field)
}
- return nil, fmt.Errorf("invalid txn field %d", field)
+ return nil, fmt.Errorf("invalid txn field %s", field)
}
return &fs, nil
}
@@ -2973,8 +3039,7 @@ func opEd25519Verify(cx *EvalContext) error {
copy(sig[:], cx.stack[prev].Bytes)
msg := Msg{ProgramHash: cx.programHash(), Data: cx.stack[pprev].Bytes}
- cx.stack[pprev].Uint = boolToUint(sv.Verify(msg, sig, cx.Proto.EnableBatchVerification))
- cx.stack[pprev].Bytes = nil
+ cx.stack[pprev] = boolToSV(sv.Verify(msg, sig))
cx.stack = cx.stack[:prev]
return nil
}
@@ -2996,8 +3061,7 @@ func opEd25519VerifyBare(cx *EvalContext) error {
}
copy(sig[:], cx.stack[prev].Bytes)
- cx.stack[pprev].Uint = boolToUint(sv.VerifyBytes(cx.stack[pprev].Bytes, sig, cx.Proto.EnableBatchVerification))
- cx.stack[pprev].Bytes = nil
+ cx.stack[pprev] = boolToSV(sv.VerifyBytes(cx.stack[pprev].Bytes, sig))
cx.stack = cx.stack[:prev]
return nil
}
@@ -3067,8 +3131,7 @@ func opEcdsaVerify(cx *EvalContext) error {
result = ecdsa.Verify(&pubkey, msg, r, s)
}
- cx.stack[fifth].Uint = boolToUint(result)
- cx.stack[fifth].Bytes = nil
+ cx.stack[fifth] = boolToSV(result)
cx.stack = cx.stack[:fourth]
return nil
}
@@ -3703,9 +3766,7 @@ func opAppOptedIn(cx *EvalContext) error {
return err
}
- cx.stack[prev].Uint = boolToUint(optedIn)
- cx.stack[prev].Bytes = nil
-
+ cx.stack[prev] = boolToSV(optedIn)
cx.stack = cx.stack[:last]
return nil
}
@@ -4154,8 +4215,6 @@ func opAcctParamsGet(cx *EvalContext) error {
return err
}
- exist := boolToUint(account.MicroAlgos.Raw > 0)
-
var value stackValue
switch fs.field {
@@ -4167,7 +4226,7 @@ func opAcctParamsGet(cx *EvalContext) error {
value.Bytes = account.AuthAddr[:]
}
cx.stack[last] = value
- cx.stack = append(cx.stack, stackValue{Uint: exist})
+ cx.stack = append(cx.stack, boolToSV(account.MicroAlgos.Raw > 0))
return nil
}
@@ -4522,6 +4581,18 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
}
txn.ClearStateProgram = make([]byte, len(sv.Bytes))
copy(txn.ClearStateProgram, sv.Bytes)
+ case ApprovalProgramPages:
+ maxPossible := cx.Proto.MaxAppProgramLen * (1 + cx.Proto.MaxExtraAppProgramPages)
+ txn.ApprovalProgram = append(txn.ApprovalProgram, sv.Bytes...)
+ if len(txn.ApprovalProgram) > maxPossible {
+ return fmt.Errorf("%s may not exceed %d bytes", fs.field, maxPossible)
+ }
+ case ClearStateProgramPages:
+ maxPossible := cx.Proto.MaxAppProgramLen * (1 + cx.Proto.MaxExtraAppProgramPages)
+ txn.ClearStateProgram = append(txn.ClearStateProgram, sv.Bytes...)
+ if len(txn.ClearStateProgram) > maxPossible {
+ return fmt.Errorf("%s may not exceed %d bytes", fs.field, maxPossible)
+ }
case Assets:
var new basics.AssetIndex
new, err = cx.availableAsset(sv)
@@ -4746,6 +4817,106 @@ func opItxnSubmit(cx *EvalContext) error {
return nil
}
+type rawMessage []byte
+
+func (rm rawMessage) ToBeHashed() (protocol.HashID, []byte) {
+ return "", []byte(rm)
+}
+
+func opVrfVerify(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // PK
+ prev := last - 1 // proof
+ pprev := prev - 1 // data
+
+ data := rawMessage(cx.stack[pprev].Bytes)
+ proofbytes := cx.stack[prev].Bytes
+ var proof crypto.VrfProof
+ if len(proofbytes) != len(proof) {
+ return fmt.Errorf("vrf proof wrong size %d != %d", len(proofbytes), len(proof))
+ }
+ copy(proof[:], proofbytes[:])
+
+ pubkeybytes := cx.stack[last].Bytes
+ var pubkey crypto.VrfPubkey
+ if len(pubkeybytes) != len(pubkey) {
+ return fmt.Errorf("vrf pubkey wrong size %d != %d", len(pubkeybytes), len(pubkey))
+ }
+ copy(pubkey[:], pubkeybytes[:])
+
+ var verified bool
+ var output []byte
+ std := VrfStandard(cx.program[cx.pc+1])
+ ss, ok := vrfStandardSpecByField(std)
+ if !ok || ss.version > cx.version {
+ return fmt.Errorf("invalid VRF standard %s", std)
+ }
+ switch std {
+ case VrfAlgorand:
+ var out crypto.VrfOutput
+ verified, out = pubkey.Verify(proof, data)
+ output = out[:]
+ default:
+ return fmt.Errorf("unsupported vrf_verify standard %s", std)
+ }
+
+ cx.stack[pprev].Bytes = output[:]
+ cx.stack[prev] = boolToSV(verified)
+ cx.stack = cx.stack[:last] // pop 1 because we take 3 args and return 2
+ return nil
+}
+
+// availableRound checks to see if the requested round, `r`, is allowed to be
+// accessed. If it is, it's returned as a basics.Round. It is named by analogy
+// to the availableAsset and availableApp helpers.
+func (cx *EvalContext) availableRound(r uint64) (basics.Round, error) {
+ firstAvail := cx.txn.Txn.LastValid - basics.Round(cx.Proto.MaxTxnLife) - 1
+ if firstAvail > cx.txn.Txn.LastValid || firstAvail == 0 { // early in chain's life
+ firstAvail = 1
+ }
+ lastAvail := cx.txn.Txn.FirstValid - 1
+ if lastAvail > cx.txn.Txn.FirstValid { // txn had a 0 in FirstValid
+ lastAvail = 0 // So nothing will be available
+ }
+ round := basics.Round(r)
+ if firstAvail > round || round > lastAvail {
+ return 0, fmt.Errorf("round %d is not available. It's outside [%d-%d]", r, firstAvail, lastAvail)
+ }
+ return round, nil
+}
+
+func opBlock(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // round
+ round, err := cx.availableRound(cx.stack[last].Uint)
+ if err != nil {
+ return err
+ }
+ f := BlockField(cx.program[cx.pc+1])
+ fs, ok := blockFieldSpecByField(f)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid block field %s", f)
+ }
+
+ hdr, err := cx.SigLedger.BlockHdrCached(round)
+ if err != nil {
+ return err
+ }
+
+ switch fs.field {
+ case BlkSeed:
+ cx.stack[last].Bytes = hdr.Seed[:]
+ return nil
+ case BlkTimestamp:
+ cx.stack[last].Bytes = nil
+ if hdr.TimeStamp < 0 {
+ return fmt.Errorf("block(%d) timestamp %d < 0", round, hdr.TimeStamp)
+ }
+ cx.stack[last].Uint = uint64(hdr.TimeStamp)
+ return nil
+ default:
+ return fmt.Errorf("invalid block field %d", fs.field)
+ }
+}
+
// PcDetails return PC and disassembled instructions at PC up to 2 opcodes back
func (cx *EvalContext) PcDetails() (pc int, dis string) {
const maxNumAdditionalOpcodes = 2
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index f1ae3c40d..35d8f28e3 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -711,7 +711,7 @@ func TestKeyReg(t *testing.T) {
`
ep, tx, ledger := MakeSampleEnv()
ep.Proto.EnableStateProofKeyregCheck = true
- ep.Proto.MaxKeyregValidPeriod = ((1 << 16) * 256) - 1 // 2^16 StateProof keys times CompactCertRounds (interval)
+ ep.Proto.MaxKeyregValidPeriod = ((1 << 16) * 256) - 1 // 2^16 StateProof keys times StateProofInterval (interval)
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), ep.Proto.MinTxnFee)
TestApp(t, params+keyreg, ep)
@@ -729,7 +729,7 @@ func TestKeyReg(t *testing.T) {
`
ep, tx, ledger := MakeSampleEnv()
ep.Proto.EnableStateProofKeyregCheck = true
- ep.Proto.MaxKeyregValidPeriod = ((1 << 16) * 256) - 1 // 2^16 StateProof keys times CompactCertRounds (interval)
+ ep.Proto.MaxKeyregValidPeriod = ((1 << 16) * 256) - 1 // 2^16 StateProof keys times StateProofInterval (interval)
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), ep.Proto.MinTxnFee)
TestApp(t, params+keyreg, ep, "validity period for keyreg transaction is too long") // VoteLast is +1 over the limit
@@ -948,12 +948,64 @@ func TestApplCreation(t *testing.T) {
TestApp(t, p+"int 3; itxn_field ExtraProgramPages"+s, ep, "3 is larger than max=2")
}
+// TestBigApplCreation focues on testing the new fields that allow constructing big programs.
+func TestBigApplCreation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, _ := MakeSampleEnv()
+
+ p := "itxn_begin;"
+ s := "; int 1"
+
+ // Recall that in test proto, max possible program size is 2700, because
+ // MaxAppProgramLen: 900
+ // MaxExtraAppProgramPages: 2
+
+ // First, test normal accummulation
+ for _, pgm := range []string{"Approval", "ClearState"} {
+ t.Run(pgm, func(t *testing.T) {
+ basic := "itxn_field " + pgm + "Program"
+ pages := "itxn_field " + pgm + "ProgramPages"
+ TestApp(t, p+`int 1000; bzero; `+pages+`
+ int 1000; bzero; `+pages+`
+ int 700; bzero; `+pages+`
+ `+s, ep)
+ TestApp(t, p+`int 1000; bzero; `+pages+`
+ int 1000; bzero; `+pages+`
+ int 701; bzero; `+pages+`
+ `+s, ep, "may not exceed 2700")
+
+ // Test the basic ApprovalProgram field resets
+ TestApp(t, p+`int 1000; bzero; `+pages+`
+ int 100; bzero; `+basic+`
+ int 1000; bzero; `+pages+`
+ int 701; bzero; `+pages+`
+ `+s, ep)
+ // Test that the 100 of the Approval program stayed around
+ TestApp(t, p+`int 1000; bzero; `+pages+`
+ int 100; bzero; `+basic+`
+ int 1000; bzero; `+pages+`
+ int 1000; bzero; `+pages+`
+ int 600; bzero; `+pages+`
+ `+s, ep)
+ TestApp(t, p+`int 1000; bzero; `+pages+`
+ int 100; bzero; `+basic+`
+ int 1000; bzero; `+pages+`
+ int 1000; bzero; `+pages+`
+ int 601; bzero; `+pages+`
+ `+s, ep, "may not exceed 2700")
+ })
+ }
+}
+
// TestApplSubmission tests for checking of illegal appl transaction in form
// only. Things where interactions between two different fields causes the
// error. These are not exhaustive, but certainly demonstrate that
// transactions.WellFormed is getting a crack at the txn.
func TestApplSubmission(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
ep, tx, ledger := MakeSampleEnv()
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
@@ -1700,7 +1752,7 @@ int 1
for _, unified := range []bool{true, false} {
t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- t.Parallel()
+ // t.Parallel() NO! unified variable is actually shared
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
@@ -2152,7 +2204,7 @@ func TestInnerTxIDCaching(t *testing.T) {
for _, unified := range []bool{true, false} {
t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- t.Parallel()
+ // t.Parallel() NO! unified variable is actually shared
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index 84a88f6e4..b2c6bec0e 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -93,6 +93,84 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A
testAccepts(t, progText, 1)
}
+// This is patterned off vrf_test.go, but we don't create proofs here, we only
+// check that the output is correct, given the proof.
+func testVrfApp(pubkey, proof, data string, output string) string {
+ source := `
+byte 0x%s
+byte 0x%s
+byte 0x%s
+vrf_verify VrfAlgorand
+assert
+byte 0x%s
+==
+`
+ return fmt.Sprintf(source, data, proof, pubkey, output)
+}
+
+func TestVrfVerify(t *testing.T) {
+ ep, _, _ := makeSampleEnv()
+ testApp(t, notrack("int 1; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 0 wanted")
+ testApp(t, notrack("byte 0x1122; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 1 wanted")
+ testApp(t, notrack("byte 0x1122; byte 0x2233; int 3; vrf_verify VrfAlgorand"), ep, "arg 2 wanted")
+ testLogic(t, "byte 0x1122; byte 0x2233; byte 0x3344; vrf_verify VrfAlgorand", LogicVersion, ep, "vrf proof wrong size")
+ // 80 byte proof
+ testLogic(t, "byte 0x1122; int 80; bzero; byte 0x3344; vrf_verify VrfAlgorand", LogicVersion, ep, "vrf pubkey wrong size")
+ // 32 byte pubkey
+ testLogic(t, "byte 0x3344; int 80; bzero; int 32; bzero; vrf_verify VrfAlgorand", LogicVersion, ep, "stack len is 2")
+
+ // working app, but the verify itself fails
+ testLogic(t, "byte 0x3344; int 80; bzero; int 32; bzero; vrf_verify VrfAlgorand; !; assert; int 64; bzero; ==", LogicVersion, ep)
+
+ source := testVrfApp(
+ "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", //pubkey
+ "b6b4699f87d56126c9117a7da55bd0085246f4c56dbc95d20172612e9d38e8d7ca65e573a126ed88d4e30a46f80a666854d675cf3ba81de0de043c3774f061560f55edc256a787afe701677c0f602900", // proof
+ "", // data
+ "5b49b554d05c0cd5a5325376b3387de59d924fd1e13ded44648ab33c21349a603f25b84ec5ed887995b33da5e3bfcb87cd2f64521c4c62cf825cffabbe5d31cc", // output
+ )
+ testLogic(t, source, LogicVersion, ep)
+
+ source = testVrfApp(
+ "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", //pk
+ "ae5b66bdf04b4c010bfe32b2fc126ead2107b697634f6f7337b9bff8785ee111200095ece87dde4dbe87343f6df3b107d91798c8a7eb1245d3bb9c5aafb093358c13e6ae1111a55717e895fd15f99f07", // pi
+ "72", // alpha
+ "94f4487e1b2fec954309ef1289ecb2e15043a2461ecc7b2ae7d4470607ef82eb1cfa97d84991fe4a7bfdfd715606bc27e2967a6c557cfb5875879b671740b7d8", // beta
+ )
+ testLogic(t, source, LogicVersion, ep)
+}
+
+// BenchMarkVerify is useful to see relative speeds of various crypto verify functions
+func BenchmarkVerify(b *testing.B) {
+ benches := [][]string{
+ {"pop", "", "int 1234576; int 6712; pop; pop", "int 1"},
+ {"add", "", "int 1234576; int 6712; +; pop", "int 1"},
+ /*
+ {"ed25519verify_bare", "", `byte 0x
+ byte 0x
+ addr
+ ed25519verify_bare
+ assert`, "int 1"},*/
+ {"ecdsa_verify", "", `byte 0x71a5910445820f57989c027bdf9391c80097874d249e0f38bf90834fdec2877f
+byte 0x5eb27782eb1a5df8de9a5d51613ad5ca730840ddf4af919c6feb15cde14f9978
+byte 0x0cb3c0d636ed991ee030d09c295de3121eb166cb9e1552cf0ef0fb2358f35f0f
+byte 0x79de0699673571df1de8486718d06a3e7838f6831ec4ef3fb963788fbfb773b7
+byte 0xd76446a3393af3e2eefada16df80cc6a881a56f4cf41fa2ab4769c5708ce878d
+ecdsa_verify Secp256k1
+assert`, "int 1"},
+ {"vrf_verify", "", `byte 0x72
+byte 0xae5b66bdf04b4c010bfe32b2fc126ead2107b697634f6f7337b9bff8785ee111200095ece87dde4dbe87343f6df3b107d91798c8a7eb1245d3bb9c5aafb093358c13e6ae1111a55717e895fd15f99f07
+byte 0x3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c
+vrf_verify VrfAlgorand
+assert // make sure we're testing success
+pop // output`, "int 1"},
+ }
+ for _, bench := range benches {
+ b.Run(bench[0], func(b *testing.B) {
+ benchmarkOperation(b, bench[1], bench[2], bench[3])
+ })
+ }
+}
+
func TestEd25519verify(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -371,10 +449,6 @@ ecdsa_verify Secp256k1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
}
func TestEcdsaWithSecp256r1(t *testing.T) {
- if LogicVersion < fidoVersion {
- return
- }
-
partitiontest.PartitionTest(t)
t.Parallel()
@@ -809,7 +883,7 @@ func benchmarkBn256DataGenData(b *testing.B) (data []benchmarkBn256Data) {
func benchmarkBn256(b *testing.B, source string) {
data := benchmarkBn256DataGenData(b)
- ops, err := AssembleStringWithVersion(source, 7)
+ ops, err := AssembleStringWithVersion(source, pairingVersion)
require.NoError(b, err)
for i := 0; i < b.N; i++ {
data[i].programs = ops.Program
@@ -873,12 +947,15 @@ func BenchmarkBn256PairingRaw(b *testing.B) {
}
func BenchmarkBn256(b *testing.B) {
+ if pairingVersion > LogicVersion {
+ b.Skip()
+ }
b.Run("bn256 add", func(b *testing.B) {
benchmarkOperation(b, "byte 0x0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32", "dup; bn256_add", "pop; int 1")
})
b.Run("bn256 scalar mul", func(b *testing.B) {
- source := `#pragma version 7
+ source := `
arg 0
arg 1
bn256_scalar_mul
@@ -889,7 +966,7 @@ int 1
})
b.Run("bn256 pairing", func(b *testing.B) {
- source := `#pragma version 7
+ source := `
arg 2
arg 3
bn256_pairing
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 2a9f329c2..3e051fc6b 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -52,6 +52,7 @@ func makeSampleEnvWithVersion(version uint64) (*EvalParams, *transactions.Transa
ep := defaultEvalParamsWithVersion(nil, version)
ep.TxnGroup = transactions.WrapSignedTxnsWithAD(makeSampleTxnGroup(makeSampleTxn()))
ledger := MakeLedger(map[basics.Address]uint64{})
+ ep.SigLedger = ledger
ep.Ledger = ledger
return ep, &ep.TxnGroup[0].Txn, ledger
}
@@ -69,7 +70,7 @@ func TestEvalModes(t *testing.T) {
t.Parallel()
// ed25519verify* and err are tested separately below
- // check modeAny (TEAL v1 + txna/gtxna) are available in RunModeSignature
+ // check modeAny (v1 + txna/gtxna) are available in RunModeSignature
// check all opcodes available in runModeApplication
opcodesRunModeAny := `intcblock 0 1 1 1 1 5 100
bytecblock 0x414c474f 0x1337 0x2001 0xdeadbeef 0x70077007
@@ -2371,6 +2372,8 @@ func TestReturnTypes(t *testing.T) {
"base64_decode": `: byte "YWJjMTIzIT8kKiYoKSctPUB+"; base64_decode StdEncoding`,
"json_ref": `: byte "{\"k\": 7}"; byte "k"; json_ref JSONUint64`,
+
+ "block": "block BlkSeed",
}
/* Make sure the specialCmd tests the opcode in question */
@@ -2391,6 +2394,8 @@ func TestReturnTypes(t *testing.T) {
"ecdsa_pk_recover": true,
"ecdsa_pk_decompress": true,
+ "vrf_verify": true,
+
"bn256_add": true,
"bn256_scalar_mul": true,
"bn256_pairing": true,
@@ -2530,6 +2535,43 @@ func TestLatestTimestamp(t *testing.T) {
testApp(t, source, ep)
}
+func TestBlockSeed(t *testing.T) {
+ ep, txn, l := makeSampleEnv()
+
+ // makeSampleEnv creates txns with fv, lv that don't actually fit the round
+ // in l. Nothing in most tests cares. But the rule for `block` is related
+ // to lv and fv, so we set the fv,lv more realistically.
+ txn.FirstValid = l.round() - 10
+ txn.LastValid = l.round() + 10
+
+ // Keep in mind that proto.MaxTxnLife is 1500 in the test proto
+
+ // l.round() is 0xffffffff+5 = 4294967300 in test ledger
+
+ // These first two tests show that current-1 is not available now, though a
+ // resonable extension is to allow such access for apps (not sigs).
+ testApp(t, "int 4294967299; block BlkSeed; len; int 32; ==", ep,
+ "not available") // current - 1
+ testApp(t, "int 4294967300; block BlkSeed; len; int 32; ==", ep,
+ "not available") // can't get current round's blockseed
+
+ testApp(t, "int 4294967300; int 1500; -; block BlkSeed; len; int 32; ==", ep,
+ "not available") // 1500 back from current is more than 1500 back from lv
+ testApp(t, "int 4294967310; int 1500; -; block BlkSeed; len; int 32; ==", ep) // 1500 back from lv is legal
+ testApp(t, "int 4294967310; int 1501; -; block BlkSeed; len; int 32; ==", ep) // 1501 back from lv is legal
+ testApp(t, "int 4294967310; int 1502; -; block BlkSeed; len; int 32; ==", ep,
+ "not available") // 1501 back from lv is not
+
+ // A little silly, as it only tests the test ledger: ensure samenes and differentness
+ testApp(t, "int 0xfffffff0; block BlkSeed; int 0xfffffff0; block BlkSeed; ==", ep)
+ testApp(t, "int 0xfffffff0; block BlkSeed; int 0xfffffff1; block BlkSeed; !=", ep)
+
+ // `block` should also work in LogicSigs, to drive home the point, blot out
+ // the normal Ledger
+ ep.Ledger = nil
+ testLogic(t, "int 0xfffffff0; block BlkTimestamp", randomnessVersion, ep)
+}
+
func TestCurrentApplicationID(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index acc3d7808..ba7df73e9 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -40,7 +40,7 @@ import (
)
// Note that most of the tests use makeTestProto/defaultEvalParams as evaluator version so that
-// we check that TEAL v1 and v2 programs are compatible with the latest evaluator
+// we check that v1 and v2 programs are compatible with the latest evaluator
func makeTestProto() *config.ConsensusParams {
return makeTestProtoV(LogicVersion)
}
@@ -115,7 +115,7 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
ep := defaultEvalParamsWithVersion(txn, LogicVersion)
ep.Trace = nil // Tracing would slow down benchmarks
clone := *ep.Proto
- bigBudget := 2 * 1000 * 1000 // Allow long run times
+ bigBudget := 1000 * 1000 * 1000 // Allow long run times
clone.LogicSigMaxCost = uint64(bigBudget)
clone.MaxAppProgramCost = bigBudget
ep.Proto = &clone
@@ -131,6 +131,7 @@ func defaultEvalParamsWithVersion(txn *transactions.SignedTxn, version uint64) *
Specials: &transactions.SpecialAddresses{},
Trace: &strings.Builder{},
FeeCredit: &zero,
+ SigLedger: MakeLedger(nil),
}
if txn != nil {
ep.TxnGroup[0].SignedTxn = *txn
@@ -185,16 +186,16 @@ func TestEmptyProgram(t *testing.T) {
testLogicBytes(t, nil, defaultEvalParams(nil), "invalid", "invalid program (empty)")
}
-// TestMinTealVersionParamEval tests eval/check reading the MinTealVersion from the param
-func TestMinTealVersionParamEvalCheckSignature(t *testing.T) {
+// TestMinAvmVersionParamEval tests eval/check reading the MinAvmVersion from the param
+func TestMinAvmVersionParamEvalCheckSignature(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
params := defaultEvalParams(nil)
version2 := uint64(rekeyingEnabledVersion)
- params.MinTealVersion = &version2
+ params.MinAvmVersion = &version2
program := make([]byte, binary.MaxVarintLen64)
- // set the teal program version to 1
+ // set the program version to 1
binary.PutUvarint(program, 1)
verErr := fmt.Sprintf("program version must be >= %d", appsEnabledVersion)
@@ -248,6 +249,64 @@ func TestTxnFieldToTealValue(t *testing.T) {
require.Equal(t, "", tealValue.Bytes)
}
+func TestTxnFirstValidTime(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, tx, ledger := makeSampleEnv()
+
+ // By default, test ledger uses an oddball round, ask it what round it's
+ // going to use and prep fv, lv accordingly.
+ current := ledger.Round()
+
+ // txn FirstValidTime is unusual. It's not really a field of a txn, but
+ // since it looks at the past of the blockchain, it is "stateless"
+
+ // Kill off ep.Ledger, to confirm it's not being used
+ ep.Ledger = nil
+
+ tx.FirstValid = current - 10
+ tx.LastValid = current + 10
+ testLogic(t, "txn FirstValidTime", 7, ep)
+
+ tx.FirstValid = current
+ testLogic(t, "txn FirstValidTime", 7, ep)
+
+ tx.FirstValid = current - basics.Round(ep.Proto.MaxTxnLife)
+ tx.LastValid = current
+ testLogic(t, "txn FirstValidTime", 7, ep)
+
+ // This test isn't really even possible because lifetime is too big. But
+ // nothing here checks that, so we can write this imposible test.
+ tx.FirstValid = current - basics.Round(ep.Proto.MaxTxnLife)
+ tx.LastValid = current + 1
+ testLogic(t, "txn FirstValidTime", 7, ep, "is not available")
+
+ // But also test behavior at the beginning of chain's life by setting the
+ // fake ledger round to a low number.
+ ledger.rnd = 10
+ tx.FirstValid = 2
+ tx.LastValid = 100
+ testLogic(t, "txn FirstValidTime; int 104; ==", 7, ep)
+
+ tx.FirstValid = 3
+ testLogic(t, "txn FirstValidTime; int 109; ==", 7, ep)
+
+ // This ensure 0 is not available, even though it "should" be allowed by the
+ // range check. round 0 doesn't exist!
+ tx.FirstValid = 1
+ testLogic(t, "txn FirstValidTime", 7, ep, "round 0 is not available")
+
+ // glassbox test - we know available range depends on LastValid - Lifetime - 1
+ tx.FirstValid = 1
+ tx.LastValid = tx.FirstValid + basics.Round(ep.Proto.MaxTxnLife)
+ testLogic(t, "txn FirstValidTime", 7, ep, "round 0 is not available")
+
+ // Same, for even weirder case of asking for a wraparound, high round
+ tx.FirstValid = 0 // I *guess* this is a legal txn early in chain's life
+ testLogic(t, "txn FirstValidTime; int 4; ==", 7, ep, "round 18446744073709551615 is not available")
+}
+
func TestWrongProtoVersion(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -855,7 +914,7 @@ func TestTxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), fmt.Sprintf("invalid txn field %d", field))
+ testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
}
}
@@ -873,7 +932,7 @@ func TestGtxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x0, 127}
// TODO: Check should know the type stack was wrong
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field 127")
+ testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field TxnField(127)")
// test gtxn does not accept ApplicationArgs and Accounts
txnOpcode := OpsByName[LogicVersion]["txn"].Opcode
@@ -885,7 +944,7 @@ func TestGtxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), fmt.Sprintf("invalid txn field %d", field))
+ testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
}
}
@@ -1005,6 +1064,10 @@ const globalV7TestProgram = globalV6TestProgram + `
// No new globals in v7
`
+const globalV8TestProgram = globalV7TestProgram + `
+// No new globals in v7
+`
+
func TestGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1023,6 +1086,7 @@ func TestGlobal(t *testing.T) {
5: {GroupID, globalV5TestProgram},
6: {CallerApplicationAddress, globalV6TestProgram},
7: {CallerApplicationAddress, globalV7TestProgram},
+ 8: {CallerApplicationAddress, globalV8TestProgram},
}
// tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
@@ -1468,7 +1532,38 @@ assert
int 1
`
-const testTxnProgramTextV7 = testTxnProgramTextV6
+const testTxnProgramTextV7 = testTxnProgramTextV6 + `
+assert
+
+txn NumApprovalProgramPages
+int 1
+==
+assert
+
+txna ApprovalProgramPages 0
+txn ApprovalProgram
+==
+assert
+
+txn NumClearStateProgramPages
+int 1
+==
+assert
+
+txna ClearStateProgramPages 0
+txn ClearStateProgram
+==
+assert
+
+txn FirstValidTime
+int 0
+>
+assert
+
+int 1
+`
+
+const testTxnProgramTextV8 = testTxnProgramTextV7
func makeSampleTxn() transactions.SignedTxn {
var txn transactions.SignedTxn
@@ -1572,6 +1667,7 @@ func TestTxn(t *testing.T) {
5: testTxnProgramTextV5,
6: testTxnProgramTextV6,
7: testTxnProgramTextV7,
+ 8: testTxnProgramTextV8,
}
for i, txnField := range TxnFieldNames {
@@ -1582,9 +1678,6 @@ func TestTxn(t *testing.T) {
continue
}
if !strings.Contains(tests[v], txnField) {
- if txnField == FirstValidTime.String() {
- continue
- }
// fields were introduced for itxn before they became available for txn
if v < txnEffectsVersion && fs.effects {
continue
@@ -1604,7 +1697,7 @@ func TestTxn(t *testing.T) {
txn.Txn.ClearStateProgram = clearOps.Program
txn.Lsig.Logic = ops.Program
txn.Txn.ExtraProgramPages = 2
- // RekeyTo not allowed in TEAL v1
+ // RekeyTo not allowed in v1
if v < rekeyingEnabledVersion {
txn.Txn.RekeyTo = basics.Address{}
}
@@ -1631,7 +1724,7 @@ func TestTxn(t *testing.T) {
if v < txnEffectsVersion {
testLogicFull(t, ops.Program, 3, ep)
} else {
- // Starting in txnEffectsVersion, there are fields we can't access all fields in Logic mode
+ // Starting in txnEffectsVersion, there are fields we can't access in Logic mode
testLogicFull(t, ops.Program, 3, ep, "not allowed in current mode")
// And the early tests use "arg" a lot - not allowed in stateful. So remove those tests.
lastArg := strings.Index(source, "arg 10\n==\n&&")
@@ -1864,7 +1957,7 @@ gtxn 0 Sender
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
txn := makeSampleTxn()
- // RekeyTo not allowed in TEAL v1
+ // RekeyTo not allowed in v1
if v < rekeyingEnabledVersion {
txn.Txn.RekeyTo = basics.Address{}
}
@@ -1976,7 +2069,7 @@ txna ApplicationArgs 0
// modify txn field to unknown one
ops.Program[2] = 99
- testLogicBytes(t, ops.Program, ep, "invalid txn field 99")
+ testLogicBytes(t, ops.Program, ep, "invalid txn field TxnField(99)")
// modify txn array index
ops.Program[2] = saved
@@ -2023,7 +2116,7 @@ txna ApplicationArgs 0
// modify gtxn field to unknown one
ops.Program[3] = 99
- testLogicBytes(t, ops.Program, ep, "invalid txn field 99")
+ testLogicBytes(t, ops.Program, ep, "invalid txn field TxnField(99)")
// modify gtxn array index
ops.Program[3] = saved
@@ -2080,6 +2173,40 @@ global ZeroAddress
testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
}
+func TestTxnBigPrograms(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+txna ApprovalProgramPages 0
+len
+int 4096
+==
+assert
+
+txna ApprovalProgramPages 1
+byte 0x01020304 // 4096 % 7 == 1, so the last four bytes start with 0x01
+==
+assert
+
+int 1
+`
+ var txn transactions.SignedTxn
+ txn.Txn.ApprovalProgram = make([]byte, 4100) // 4 bytes more than a page
+ for i := range txn.Txn.ApprovalProgram {
+ txn.Txn.ApprovalProgram[i] = byte(i % 7)
+ }
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(&txn))
+
+ testLogic(t, `txna ApprovalProgramPages 2`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ "invalid ApprovalProgramPages index")
+
+ // ClearStateProgram is not in the txn at all
+ testLogic(t, `txn NumClearStateProgramPages; !`, AssemblerMaxVersion, defaultEvalParams(&txn))
+ testLogic(t, `txna ClearStateProgramPages 0`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ "invalid ClearStateProgramPages index")
+}
+
func TestTxnas(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -2265,7 +2392,7 @@ func TestExtractOp(t *testing.T) {
testAccepts(t, "byte 0x123456789abcaa; extract 0 6; byte 0x123456789abcaa; !=", 5)
testAccepts(t, "byte 0x123456789abc; int 5; int 1; extract3; byte 0xbc; ==", 5)
-
+ testAccepts(t, "byte 0x123456789abc; int 5; int 1; extract; byte 0xbc; ==", 5)
testAccepts(t, "byte 0x123456789abcdef0; int 1; extract_uint16; int 0x3456; ==", 5)
testAccepts(t, "byte 0x123456789abcdef0; int 1; extract_uint32; int 0x3456789a; ==", 5)
testAccepts(t, "byte 0x123456789abcdef0; int 0; extract_uint64; int 0x123456789abcdef0; ==", 5)
@@ -2273,6 +2400,7 @@ func TestExtractOp(t *testing.T) {
testAccepts(t, `byte "hello"; extract 5 0; byte ""; ==`, 5)
testAccepts(t, `byte "hello"; int 5; int 0; extract3; byte ""; ==`, 5)
+ testAccepts(t, `byte "hello"; int 5; int 0; extract; byte ""; ==`, 5)
}
func TestExtractFlop(t *testing.T) {
@@ -2281,11 +2409,17 @@ func TestExtractFlop(t *testing.T) {
// fails in compiler
testProg(t, `byte 0xf000000000000000
extract
- len`, 5, Expect{2, "extract expects 2 immediate arguments"})
+ len`, 5, Expect{2, "extract without immediates expects 3 stack arguments but stack height is 1"})
testProg(t, `byte 0xf000000000000000
extract 1
- len`, 5, Expect{2, "extract expects 2 immediate arguments"})
+ len`, 5, Expect{2, "extract expects 0 or 2 immediate arguments"})
+
+ testProg(t, `byte 0xf000000000000000
+ int 0
+ int 5
+ extract3 1 2
+ len`, 5, Expect{4, "extract3 expects 0 immediate arguments"})
// fails at runtime
err := testPanics(t, `byte 0xf000000000000000
@@ -2566,6 +2700,7 @@ int 1`,
Proto: makeTestProto(),
TxnGroup: txgroup,
pastScratch: make([]*scratchSpace, 2),
+ SigLedger: MakeLedger(nil),
}
switch failCase.runMode {
@@ -3373,6 +3508,7 @@ int 142791994204213819
`
func evalLoop(b *testing.B, runs int, program []byte) {
+ b.Helper()
b.ResetTimer()
for i := 0; i < runs; i++ {
var txn transactions.SignedTxn
@@ -3407,6 +3543,7 @@ func benchmarkBasicProgram(b *testing.B, source string) {
// during the "operation". They are presumed to be fast (15/ns), so
// the idea is that you can subtract that out from the reported speed
func benchmarkOperation(b *testing.B, prefix string, operation string, suffix string) {
+ b.Helper()
runs := 1 + b.N/2000
inst := strings.Count(operation, ";") + strings.Count(operation, "\n")
source := prefix + ";" + strings.Repeat(operation+";", 2000) + ";" + suffix
@@ -3833,7 +3970,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
testApp(t, source, ep)
}
-func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
+func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -3878,8 +4015,8 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
ep := defaultEvalParams(nil)
ep.TxnGroup = transactions.WrapSignedTxnsWithAD(cse.group)
- // Computed MinTealVersion should be == validFromVersion
- calc := ComputeMinTealVersion(ep.TxnGroup)
+ // Computed MinAvmVersion should be == validFromVersion
+ calc := ComputeMinAvmVersion(ep.TxnGroup)
require.Equal(t, calc, cse.validFromVersion)
// Should fail for all versions < validFromVersion
@@ -4034,7 +4171,7 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
for v := uint64(0); v < rekeyingEnabledVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops := testProg(t, `int 1`, v)
+ ops := testProg(t, "int 1", v)
var txn transactions.SignedTxn
txn.Txn.RekeyTo = basics.Address{1, 2, 3, 4}
ep := defaultEvalParams(&txn)
@@ -5134,7 +5271,7 @@ func TestOpJSONRef(t *testing.T) {
if fidoVersion <= AssemblerMaxVersion {
for i := range expectedErrs {
if strings.Contains(expectedErrs[i].s, "json_ref") {
- expectedErrs[i].s = fmt.Sprintf("json_ref opcode was introduced in TEAL v%d", fidoVersion)
+ expectedErrs[i].s = fmt.Sprintf("json_ref opcode was introduced in v%d", fidoVersion)
}
}
}
@@ -5345,7 +5482,7 @@ func TestOpJSONRef(t *testing.T) {
if fidoVersion <= AssemblerMaxVersion {
for i := range expectedErrs {
if strings.Contains(expectedErrs[i].s, "json_ref") {
- expectedErrs[i].s = fmt.Sprintf("json_ref opcode was introduced in TEAL v%d", fidoVersion)
+ expectedErrs[i].s = fmt.Sprintf("json_ref opcode was introduced in v%d", fidoVersion)
}
}
}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 0954c3229..cb1685d91 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -23,7 +23,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType -output=fields_string.go
+//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go
// FieldSpec unifies the various specs for assembly, disassembly, and doc generation.
type FieldSpec interface {
@@ -69,7 +69,7 @@ const (
Fee
// FirstValid Transaction.FirstValid
FirstValid
- // FirstValidTime panic
+ // FirstValidTime timestamp of block(FirstValid-1)
FirstValidTime
// LastValid Transaction.LastValid
LastValid
@@ -201,6 +201,18 @@ const (
// StateProofPK Transaction.StateProofPK
StateProofPK
+ // ApprovalProgramPages [][]byte
+ ApprovalProgramPages
+
+ // NumApprovalProgramPages = len(ApprovalProgramPages) // 4096
+ NumApprovalProgramPages
+
+ // ClearStateProgramPages [][]byte
+ ClearStateProgramPages
+
+ // NumClearStateProgramPages = len(ClearStateProgramPages) // 4096
+ NumClearStateProgramPages
+
invalidTxnField // compile-time constant for number of fields
)
@@ -258,7 +270,7 @@ var txnFieldSpecs = [...]txnFieldSpec{
{Sender, StackBytes, false, 0, 5, false, "32 byte address"},
{Fee, StackUint64, false, 0, 5, false, "microalgos"},
{FirstValid, StackUint64, false, 0, 0, false, "round number"},
- {FirstValidTime, StackUint64, false, 0, 0, false, "Causes program to fail; reserved for future use"},
+ {FirstValidTime, StackUint64, false, randomnessVersion, 0, false, "UNIX timestamp of block before txn.FirstValid. Fails if negative"},
{LastValid, StackUint64, false, 0, 0, false, "round number"},
{Note, StackBytes, false, 0, 6, false, "Any data up to 1024 bytes"},
{Lease, StackBytes, false, 0, 0, false, "32 byte lease value"},
@@ -271,11 +283,11 @@ var txnFieldSpecs = [...]txnFieldSpec{
{VoteLast, StackUint64, false, 0, 6, false, "The last round that the participation key is valid."},
{VoteKeyDilution, StackUint64, false, 0, 6, false, "Dilution for the 2-level participation key"},
{Type, StackBytes, false, 0, 5, false, "Transaction type as bytes"},
- {TypeEnum, StackUint64, false, 0, 5, false, "See table below"},
+ {TypeEnum, StackUint64, false, 0, 5, false, "Transaction type as integer"},
{XferAsset, StackUint64, false, 0, 5, false, "Asset ID"},
{AssetAmount, StackUint64, false, 0, 5, false, "value in Asset's units"},
{AssetSender, StackBytes, false, 0, 5, false,
- "32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset."},
+ "32 byte address. Source of assets if Sender is the Asset's Clawback address."},
{AssetReceiver, StackBytes, false, 0, 5, false, "32 byte address"},
{AssetCloseTo, StackBytes, false, 0, 5, false, "32 byte address"},
{GroupIndex, StackUint64, false, 0, 0, false,
@@ -332,19 +344,26 @@ var txnFieldSpecs = [...]txnFieldSpec{
{LastLog, StackBytes, false, 6, 0, true, "The last message emitted. Empty bytes if none were emitted"},
// Not an effect. Just added after the effects fields.
- {StateProofPK, StackBytes, false, 6, 6, false, "64 byte state proof public key commitment"},
+ {StateProofPK, StackBytes, false, 6, 6, false, "64 byte state proof public key"},
+
+ // Pseudo-fields to aid access to large programs (bigger than TEAL values)
+ // reading in a txn seems not *super* useful, but setting in `itxn` is critical to inner app factories
+ {ApprovalProgramPages, StackBytes, true, 7, 7, false, "Approval Program as an array of pages"},
+ {NumApprovalProgramPages, StackUint64, false, 7, 0, false, "Number of Approval Program pages"},
+ {ClearStateProgramPages, StackBytes, true, 7, 7, false, "ClearState Program as an array of pages"},
+ {NumClearStateProgramPages, StackUint64, false, 7, 0, false, "Number of ClearState Program pages"},
}
// TxnFields contains info on the arguments to the txn* family of opcodes
var TxnFields = FieldGroup{
- "txn", "Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))",
+ "txn", "",
TxnFieldNames[:],
txnFieldSpecByName,
}
-// TxnScalarFields narows TxnFields to only have the names of scalar fetching opcodes
+// TxnScalarFields narrows TxnFields to only have the names of scalar fetching opcodes
var TxnScalarFields = FieldGroup{
- "txn", "",
+ "txn", "Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))",
txnScalarFieldNames(),
txnFieldSpecByName,
}
@@ -367,7 +386,7 @@ func txnScalarFieldNames() []string {
// TxnArrayFields narows TxnFields to only have the names of array fetching opcodes
var TxnArrayFields = FieldGroup{
- "txna", "",
+ "txna", "Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))",
txnaFieldNames(),
txnFieldSpecByName,
}
@@ -545,7 +564,7 @@ func (fs globalFieldSpec) Note() string {
}
var globalFieldSpecs = [...]globalFieldSpec{
- // version 0 is the same as TEAL v1 (initial TEAL release)
+ // version 0 is the same as v1 (initial release)
{MinTxnFee, StackUint64, modeAny, 0, "microalgos"},
{MinBalance, StackUint64, modeAny, 0, "microalgos"},
{MaxTxnLife, StackUint64, modeAny, 0, "rounds"},
@@ -787,6 +806,145 @@ var JSONRefTypes = FieldGroup{
jsonRefSpecByName,
}
+// VrfStandard is an enum for the `vrf_verify` opcode
+type VrfStandard int
+
+const (
+ // VrfAlgorand is the built-in VRF of the Algorand chain
+ VrfAlgorand VrfStandard = iota
+ invalidVrfStandard // compile-time constant for number of fields
+)
+
+var vrfStandardNames [invalidVrfStandard]string
+
+type vrfStandardSpec struct {
+ field VrfStandard
+ version uint64
+}
+
+var vrfStandardSpecs = [...]vrfStandardSpec{
+ {VrfAlgorand, randomnessVersion},
+}
+
+func vrfStandardSpecByField(r VrfStandard) (vrfStandardSpec, bool) {
+ if int(r) >= len(vrfStandardSpecs) {
+ return vrfStandardSpec{}, false
+ }
+ return vrfStandardSpecs[r], true
+}
+
+var vrfStandardSpecByName = make(vrfStandardSpecMap, len(vrfStandardNames))
+
+type vrfStandardSpecMap map[string]vrfStandardSpec
+
+func (s vrfStandardSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+func (fs vrfStandardSpec) Field() byte {
+ return byte(fs.field)
+}
+
+func (fs vrfStandardSpec) Type() StackType {
+ return StackNone // Will not show, since all are the same
+}
+
+func (fs vrfStandardSpec) OpVersion() uint64 {
+ return randomnessVersion
+}
+
+func (fs vrfStandardSpec) Version() uint64 {
+ return fs.version
+}
+
+func (fs vrfStandardSpec) Note() string {
+ note := "" // no doc list?
+ return note
+}
+
+func (s vrfStandardSpecMap) SpecByName(name string) FieldSpec {
+ return s[name]
+}
+
+// VrfStandards describes the json_ref immediate
+var VrfStandards = FieldGroup{
+ "vrf_verify", "Standards",
+ vrfStandardNames[:],
+ vrfStandardSpecByName,
+}
+
+// BlockField is an enum for the `block` opcode
+type BlockField int
+
+const (
+ // BlkSeed is the Block's vrf seed
+ BlkSeed BlockField = iota
+ // BlkTimestamp is the Block's timestamp, seconds from epoch
+ BlkTimestamp
+ invalidBlockField // compile-time constant for number of fields
+)
+
+var blockFieldNames [invalidBlockField]string
+
+type blockFieldSpec struct {
+ field BlockField
+ ftype StackType
+ version uint64
+}
+
+var blockFieldSpecs = [...]blockFieldSpec{
+ {BlkSeed, StackBytes, randomnessVersion},
+ {BlkTimestamp, StackUint64, randomnessVersion},
+}
+
+func blockFieldSpecByField(r BlockField) (blockFieldSpec, bool) {
+ if int(r) >= len(blockFieldSpecs) {
+ return blockFieldSpec{}, false
+ }
+ return blockFieldSpecs[r], true
+}
+
+var blockFieldSpecByName = make(blockFieldSpecMap, len(blockFieldNames))
+
+type blockFieldSpecMap map[string]blockFieldSpec
+
+func (s blockFieldSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+func (fs blockFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+
+func (fs blockFieldSpec) Type() StackType {
+ return fs.ftype
+}
+
+func (fs blockFieldSpec) OpVersion() uint64 {
+ return randomnessVersion
+}
+
+func (fs blockFieldSpec) Version() uint64 {
+ return fs.version
+}
+
+func (fs blockFieldSpec) Note() string {
+ return ""
+}
+
+func (s blockFieldSpecMap) SpecByName(name string) FieldSpec {
+ return s[name]
+}
+
+// BlockFields describes the json_ref immediate
+var BlockFields = FieldGroup{
+ "block", "Fields",
+ blockFieldNames[:],
+ blockFieldSpecByName,
+}
+
// AssetHoldingField is an enum for `asset_holding_get` opcode
type AssetHoldingField int
@@ -1040,7 +1198,7 @@ var AppParamsFields = FieldGroup{
type AcctParamsField int
const (
- // AcctBalance is the blance, with pending rewards
+ // AcctBalance is the balance, with pending rewards
AcctBalance AcctParamsField = iota
// AcctMinBalance is algos needed for this accounts apps and assets
AcctMinBalance
@@ -1146,6 +1304,20 @@ func init() {
jsonRefSpecByName[s.field.String()] = s
}
+ equal(len(vrfStandardSpecs), len(vrfStandardNames))
+ for i, s := range vrfStandardSpecs {
+ equal(int(s.field), i)
+ vrfStandardNames[i] = s.field.String()
+ vrfStandardSpecByName[s.field.String()] = s
+ }
+
+ equal(len(blockFieldSpecs), len(blockFieldNames))
+ for i, s := range blockFieldSpecs {
+ equal(int(s.field), i)
+ blockFieldNames[i] = s.field.String()
+ blockFieldSpecByName[s.field.String()] = s
+ }
+
equal(len(assetHoldingFieldSpecs), len(assetHoldingFieldNames))
for i, s := range assetHoldingFieldSpecs {
equal(int(s.field), i)
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index cd2298463..6c90a7a67 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -1,4 +1,4 @@
-// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType -output=fields_string.go"; DO NOT EDIT.
+// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go"; DO NOT EDIT.
package logic
@@ -72,12 +72,16 @@ func _() {
_ = x[CreatedApplicationID-61]
_ = x[LastLog-62]
_ = x[StateProofPK-63]
- _ = x[invalidTxnField-64]
+ _ = x[ApprovalProgramPages-64]
+ _ = x[NumApprovalProgramPages-65]
+ _ = x[ClearStateProgramPages-66]
+ _ = x[NumClearStateProgramPages-67]
+ _ = x[invalidTxnField-68]
}
-const _TxnField_name = "SenderFeeFirstValidFirstValidTimeLastValidNoteLeaseReceiverAmountCloseRemainderToVotePKSelectionPKVoteFirstVoteLastVoteKeyDilutionTypeTypeEnumXferAssetAssetAmountAssetSenderAssetReceiverAssetCloseToGroupIndexTxIDApplicationIDOnCompletionApplicationArgsNumAppArgsAccountsNumAccountsApprovalProgramClearStateProgramRekeyToConfigAssetConfigAssetTotalConfigAssetDecimalsConfigAssetDefaultFrozenConfigAssetUnitNameConfigAssetNameConfigAssetURLConfigAssetMetadataHashConfigAssetManagerConfigAssetReserveConfigAssetFreezeConfigAssetClawbackFreezeAssetFreezeAssetAccountFreezeAssetFrozenAssetsNumAssetsApplicationsNumApplicationsGlobalNumUintGlobalNumByteSliceLocalNumUintLocalNumByteSliceExtraProgramPagesNonparticipationLogsNumLogsCreatedAssetIDCreatedApplicationIDLastLogStateProofPKinvalidTxnField"
+const _TxnField_name = "SenderFeeFirstValidFirstValidTimeLastValidNoteLeaseReceiverAmountCloseRemainderToVotePKSelectionPKVoteFirstVoteLastVoteKeyDilutionTypeTypeEnumXferAssetAssetAmountAssetSenderAssetReceiverAssetCloseToGroupIndexTxIDApplicationIDOnCompletionApplicationArgsNumAppArgsAccountsNumAccountsApprovalProgramClearStateProgramRekeyToConfigAssetConfigAssetTotalConfigAssetDecimalsConfigAssetDefaultFrozenConfigAssetUnitNameConfigAssetNameConfigAssetURLConfigAssetMetadataHashConfigAssetManagerConfigAssetReserveConfigAssetFreezeConfigAssetClawbackFreezeAssetFreezeAssetAccountFreezeAssetFrozenAssetsNumAssetsApplicationsNumApplicationsGlobalNumUintGlobalNumByteSliceLocalNumUintLocalNumByteSliceExtraProgramPagesNonparticipationLogsNumLogsCreatedAssetIDCreatedApplicationIDLastLogStateProofPKApprovalProgramPagesNumApprovalProgramPagesClearStateProgramPagesNumClearStateProgramPagesinvalidTxnField"
-var _TxnField_index = [...]uint16{0, 6, 9, 19, 33, 42, 46, 51, 59, 65, 81, 87, 98, 107, 115, 130, 134, 142, 151, 162, 173, 186, 198, 208, 212, 225, 237, 252, 262, 270, 281, 296, 313, 320, 331, 347, 366, 390, 409, 424, 438, 461, 479, 497, 514, 533, 544, 562, 579, 585, 594, 606, 621, 634, 652, 664, 681, 698, 714, 718, 725, 739, 759, 766, 778, 793}
+var _TxnField_index = [...]uint16{0, 6, 9, 19, 33, 42, 46, 51, 59, 65, 81, 87, 98, 107, 115, 130, 134, 142, 151, 162, 173, 186, 198, 208, 212, 225, 237, 252, 262, 270, 281, 296, 313, 320, 331, 347, 366, 390, 409, 424, 438, 461, 479, 497, 514, 533, 544, 562, 579, 585, 594, 606, 621, 634, 652, 664, 681, 698, 714, 718, 725, 739, 759, 766, 778, 798, 821, 843, 868, 883}
func (i TxnField) String() string {
if i < 0 || i >= TxnField(len(_TxnField_index)-1) {
@@ -291,3 +295,40 @@ func (i JSONRefType) String() string {
}
return _JSONRefType_name[_JSONRefType_index[i]:_JSONRefType_index[i+1]]
}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VrfAlgorand-0]
+ _ = x[invalidVrfStandard-1]
+}
+
+const _VrfStandard_name = "VrfAlgorandinvalidVrfStandard"
+
+var _VrfStandard_index = [...]uint8{0, 11, 29}
+
+func (i VrfStandard) String() string {
+ if i < 0 || i >= VrfStandard(len(_VrfStandard_index)-1) {
+ return "VrfStandard(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VrfStandard_name[_VrfStandard_index[i]:_VrfStandard_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[BlkSeed-0]
+ _ = x[BlkTimestamp-1]
+ _ = x[invalidBlockField-2]
+}
+
+const _BlockField_name = "BlkSeedBlkTimestampinvalidBlockField"
+
+var _BlockField_index = [...]uint8{0, 7, 19, 36}
+
+func (i BlockField) String() string {
+ if i < 0 || i >= BlockField(len(_BlockField_index)-1) {
+ return "BlockField(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _BlockField_name[_BlockField_index[i]:_BlockField_index[i+1]]
+}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index 4a8128c87..0a2928813 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -78,7 +78,7 @@ func TestGlobalFieldsVersions(t *testing.T) {
}
}
-// ensure v2+ fields error in programs of previous TEAL version, similarly to global fields test
+// ensure v2+ fields error in programs of previous version, similarly to global fields test
func TestTxnFieldVersions(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -105,7 +105,7 @@ func TestTxnFieldVersions(t *testing.T) {
txn := makeSampleTxn()
// We'll reject too early if we have a nonzero RekeyTo, because that
// field must be zero for every txn in the group if this is an old
- // TEAL version
+ // AVM version
txn.Txn.RekeyTo = basics.Address{}
txgroup := makeSampleTxnGroup(txn)
asmDefaultError := "...was introduced in ..."
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index 13df5e0e6..4e29b9a88 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,6 +1,6 @@
{
"EvalMaxVersion": 7,
- "LogicSigVersion": 6,
+ "LogicSigVersion": 7,
"Ops": [
{
"Opcode": 0,
@@ -608,11 +608,14 @@
"CreatedAssetID",
"CreatedApplicationID",
"LastLog",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "NumApprovalProgramPages",
+ "ClearStateProgramPages",
+ "NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
"Doc": "field F of current transaction",
- "DocExtra": "FirstValidTime causes the program to fail. The field is reserved for future use.",
"ImmediateNote": "{uint8 transaction field index}",
"Groups": [
"Loading Values"
@@ -716,9 +719,13 @@
"CreatedAssetID",
"CreatedApplicationID",
"LastLog",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "NumApprovalProgramPages",
+ "ClearStateProgramPages",
+ "NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
"Doc": "field F of the Tth transaction in the current group",
"DocExtra": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
"ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
@@ -758,10 +765,12 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
- "Doc": "Ith value of the array field F of the current transaction",
+ "ArgEnumTypes": "BBUUBBB",
+ "Doc": "Ith value of the array field F of the current transaction\n`txna` can be called using `txn` with 2 immediates.",
"ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
"Groups": [
"Loading Values"
@@ -777,10 +786,12 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
- "Doc": "Ith value of the array field F from the Tth transaction in the current group",
+ "ArgEnumTypes": "BBUUBBB",
+ "Doc": "Ith value of the array field F from the Tth transaction in the current group\n`gtxna` can be called using `gtxn` with 3 immediates.",
"ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
"Groups": [
"Loading Values"
@@ -856,9 +867,13 @@
"CreatedAssetID",
"CreatedApplicationID",
"LastLog",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "NumApprovalProgramPages",
+ "ClearStateProgramPages",
+ "NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
"Doc": "field F of the Ath transaction in the current group",
"DocExtra": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
"ImmediateNote": "{uint8 transaction field index}",
@@ -877,10 +892,12 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
- "Doc": "Ith value of the array field F from the Ath transaction in the current group",
+ "ArgEnumTypes": "BBUUBBB",
+ "Doc": "Ith value of the array field F from the Ath transaction in the current group\n`gtxnsa` can be called using `gtxns` with 2 immediates.",
"ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
"Groups": [
"Loading Values"
@@ -1200,7 +1217,7 @@
"Args": "BUU",
"Returns": "B",
"Size": 1,
- "Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
+ "Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails\n`extract3` can be called using `extract` with no immediates.",
"Groups": [
"Byte Array Manipulation"
]
@@ -1244,7 +1261,7 @@
"Args": "BB",
"Returns": "B",
"Size": 2,
- "Doc": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
+ "Doc": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)\n`replace2` can be called using `replace` with 1 immediate.",
"ImmediateNote": "{uint8 start position}",
"Groups": [
"Byte Array Manipulation"
@@ -1256,7 +1273,7 @@
"Args": "BUB",
"Returns": "B",
"Size": 1,
- "Doc": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
+ "Doc": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)\n`replace3` can be called using `replace` with no immediates.",
"Groups": [
"Byte Array Manipulation"
]
@@ -1268,7 +1285,7 @@
"Returns": "B",
"Size": 2,
"Doc": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
- "DocExtra": "Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See \u003ca href=\"https://rfc-editor.org/rfc/rfc4648.html#section-4\"\u003eRFC 4648\u003c/a\u003e (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
+ "DocExtra": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings.\tThis opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
"ImmediateNote": "{uint8 encoding index}",
"Groups": [
"Byte Array Manipulation"
@@ -1280,8 +1297,8 @@
"Args": "BB",
"Returns": ".",
"Size": 2,
- "Doc": "return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A",
- "DocExtra": "specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.",
+ "Doc": "key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A",
+ "DocExtra": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
"ImmediateNote": "{string return type}",
"Groups": [
"Byte Array Manipulation"
@@ -1661,42 +1678,6 @@
]
},
{
- "Opcode": 153,
- "Name": "bn256_add",
- "Args": "BB",
- "Returns": "B",
- "Size": 1,
- "Doc": "for (curve points A and B) return the curve point A + B",
- "DocExtra": "A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.",
- "Groups": [
- "Arithmetic"
- ]
- },
- {
- "Opcode": 154,
- "Name": "bn256_scalar_mul",
- "Args": "BB",
- "Returns": "B",
- "Size": 1,
- "Doc": "for (curve point A, scalar K) return the curve point KA",
- "DocExtra": "A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.",
- "Groups": [
- "Arithmetic"
- ]
- },
- {
- "Opcode": 155,
- "Name": "bn256_pairing",
- "Args": "BB",
- "Returns": "U",
- "Size": 1,
- "Doc": "for (points in G1 group G1s, points in G2 group G2s), return whether they are paired =\u003e {0 or 1}",
- "DocExtra": "G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.",
- "Groups": [
- "Arithmetic"
- ]
- },
- {
"Opcode": 160,
"Name": "b+",
"Args": "BB",
@@ -1947,9 +1928,11 @@
"LocalNumByteSlice",
"ExtraProgramPages",
"Nonparticipation",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BUBBUBBBUUUBUUUBBBUUBBBBBUUUUBBBBBBBBUBUUUUUUUUUB",
+ "ArgEnumTypes": "BUBBUBBBUUUBUUUBBBUUBBBBBUUUUBBBBBBBBUBUUUUUUUUUBBB",
"Doc": "set field F of the current inner transaction to A",
"DocExtra": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
"ImmediateNote": "{uint8 transaction field index}",
@@ -2036,9 +2019,13 @@
"CreatedAssetID",
"CreatedApplicationID",
"LastLog",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "NumApprovalProgramPages",
+ "ClearStateProgramPages",
+ "NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
"Doc": "field F of the last inner transaction",
"ImmediateNote": "{uint8 transaction field index}",
"Groups": [
@@ -2055,9 +2042,11 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
+ "ArgEnumTypes": "BBUUBBB",
"Doc": "Ith value of the array field F of the last inner transaction",
"ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
"Groups": [
@@ -2143,9 +2132,13 @@
"CreatedAssetID",
"CreatedApplicationID",
"LastLog",
- "StateProofPK"
+ "StateProofPK",
+ "ApprovalProgramPages",
+ "NumApprovalProgramPages",
+ "ClearStateProgramPages",
+ "NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
"Doc": "field F of the Tth transaction in the last inner group submitted",
"ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
"Groups": [
@@ -2162,9 +2155,11 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
+ "ArgEnumTypes": "BBUUBBB",
"Doc": "Ith value of the array field F from the Tth transaction in the last inner group submitted",
"ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
"Groups": [
@@ -2182,9 +2177,11 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
+ "ArgEnumTypes": "BBUUBBB",
"Doc": "Ath value of the array field F of the current transaction",
"ImmediateNote": "{uint8 transaction field index}",
"Groups": [
@@ -2202,9 +2199,11 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
+ "ArgEnumTypes": "BBUUBBB",
"Doc": "Ath value of the array field F from the Tth transaction in the current group",
"ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
"Groups": [
@@ -2222,9 +2221,11 @@
"Accounts",
"Assets",
"Applications",
- "Logs"
+ "Logs",
+ "ApprovalProgramPages",
+ "ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUB",
+ "ArgEnumTypes": "BBUUBBB",
"Doc": "Bth value of the array field F from the Ath transaction in the current group",
"ImmediateNote": "{uint8 transaction field index}",
"Groups": [
@@ -2276,6 +2277,31 @@
"Groups": [
"Inner Transactions"
]
+ },
+ {
+ "Opcode": 208,
+ "Name": "vrf_verify",
+ "Args": "BBB",
+ "Returns": "BU",
+ "Size": 2,
+ "Doc": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.",
+ "DocExtra": "`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).",
+ "ImmediateNote": "{uint8 parameters index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 209,
+ "Name": "block",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
+ "ImmediateNote": "{uint8 block field}",
+ "Groups": [
+ "State Access"
+ ]
}
]
}
diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go
index 5df6102e1..9f427b0e9 100644
--- a/data/transactions/logic/ledger_test.go
+++ b/data/transactions/logic/ledger_test.go
@@ -19,9 +19,12 @@ package logic
import (
"errors"
"fmt"
+ "math"
"math/rand"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
@@ -197,6 +200,25 @@ func (l *Ledger) LatestTimestamp() int64 {
return int64(rand.Uint32() + 1)
}
+// BlockHdrCached returns the block header for the given round, if it is available
+func (l *Ledger) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ hdr := bookkeeping.BlockHeader{}
+ // Return a fake seed that is different for each round
+ seed := committee.Seed{}
+ seed[0] = byte(round)
+ seed[1] = byte(round >> 8)
+ seed[2] = byte(round >> 16)
+ seed[3] = byte(round >> 24)
+ seed[4] = byte(round >> 32)
+ seed[5] = byte(round >> 40)
+ seed[6] = byte(round >> 48)
+ seed[7] = byte(round >> 56)
+ hdr.Seed = seed
+ hdr.TimeStamp = 100 + (9 * int64(round) / 2)
+ return hdr, nil
+ // perhaps should add an error when requesting old round for better testing
+}
+
// AccountData returns a version of the account that is good enough for
// satisfying AVM needs. (balance, calc minbalance, and authaddr)
func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error) {
@@ -802,7 +824,8 @@ func (l *Ledger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool,
func (l *Ledger) round() basics.Round {
if l.rnd == basics.Round(0) {
- l.rnd = basics.Round(rand.Uint32() + 1)
+ // Something big enough to shake out bugs from width
+ l.rnd = basics.Round(uint64(math.MaxUint32) + 5)
}
return l.rnd
}
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index af554fb22..dc5627422 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -24,7 +24,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 7
+const LogicVersion = 8
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -32,7 +32,7 @@ const LogicVersion = 7
const rekeyingEnabledVersion = 2
// appsEnabledVersion is the version of TEAL where ApplicationCall
-// functionality was enabled. We use this to disallow v0 and v1 TEAL programs
+// functionality was enabled. We use this to disallow v0 and v1 programs
// from being used with applications. Do not edit!
const appsEnabledVersion = 2
@@ -62,11 +62,13 @@ const createdResourcesVersion = 6
// field.
const appAddressAvailableVersion = 7
+const fidoVersion = 7 // base64, json, secp256r1
+const randomnessVersion = 7 // vrf_verify, block
+
// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
// moved from vFuture to a new consensus version. If they remain unready, bump
// their version, and fixup TestAssemble() in assembler_test.go.
-const fidoVersion = 7 // base64, json, secp256r1
-const pairingVersion = 7 // bn256 opcodes. will add bls12-381, and unify the available opcodes.// experimental-
+const pairingVersion = 8 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
type linearCost struct {
baseCost int
@@ -75,17 +77,18 @@ type linearCost struct {
depth int
}
-// divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
+// divCeil provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
// The method does _not_ check for divide-by-zero.
-func divideCeilUnsafely(numerator int, denominator int) int {
+func divCeil(numerator int, denominator int) int {
return (numerator + denominator - 1) / denominator
}
func (lc *linearCost) compute(stack []stackValue) int {
cost := lc.baseCost
if lc.chunkCost != 0 && lc.chunkSize != 0 {
- // Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs.
- cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1-lc.depth].Bytes), lc.chunkSize)
+ // Uses divCeil rather than (count/chunkSize) to match how Ethereum discretizes hashing costs.
+ count := len(stack[len(stack)-1-lc.depth].Bytes)
+ cost += lc.chunkCost * divCeil(count, lc.chunkSize)
}
return cost
}
@@ -341,7 +344,7 @@ type OpSpec struct {
Name string
op evalFunc // evaluate the op
Proto
- Version uint64 // TEAL version opcode introduced
+ Version uint64 // AVM version opcode introduced
OpDetails // Special cost or bytecode layout considerations
}
@@ -371,9 +374,9 @@ var OpSpecs = []OpSpec{
{0x02, "keccak256", opKeccak256, proto("b:b"), 1, costly(26)},
{0x03, "sha512_256", opSHA512_256, proto("b:b"), 1, costly(9)},
- // Cost of these opcodes increases in TEAL version 2 based on measured
+ // Cost of these opcodes increases in AVM version 2 based on measured
// performance. Should be able to run max hashes during stateful TEAL
- // and achieve reasonable TPS. Same opcode for different TEAL versions
+ // and achieve reasonable TPS. Same opcode for different versions
// is OK.
{0x01, "sha256", opSHA256, proto("b:b"), 2, costly(35)},
{0x02, "keccak256", opKeccak256, proto("b:b"), 2, costly(130)},
@@ -438,20 +441,17 @@ var OpSpecs = []OpSpec{
{0x2e, "arg_1", opArg1, proto(":b"), 1, only(modeSig)},
{0x2f, "arg_2", opArg2, proto(":b"), 1, only(modeSig)},
{0x30, "arg_3", opArg3, proto(":b"), 1, only(modeSig)},
+ // txn, gtxn, and gtxns are also implemented as pseudoOps to choose
+ // between scalar and array version based on number of immediates.
{0x31, "txn", opTxn, proto(":a"), 1, field("f", &TxnScalarFields)},
- // It is ok to have the same opcode for different TEAL versions.
- // This 'txn' asm command supports additional argument in version 2 and
- // generates 'txna' opcode in that particular case
- {0x31, "txn", opTxn, proto(":a"), 2, field("f", &TxnFields).assembler(asmTxn2)},
{0x32, "global", opGlobal, proto(":a"), 1, field("f", &GlobalFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 1, immediates("t", "f").field("f", &TxnScalarFields)},
- {0x33, "gtxn", opGtxn, proto(":a"), 2, immediates("t", "f").field("f", &TxnFields).assembler(asmGtxn2)},
{0x34, "load", opLoad, proto(":a"), 1, stacky(typeLoad, "i")},
{0x35, "store", opStore, proto("a:"), 1, stacky(typeStore, "i")},
{0x36, "txna", opTxna, proto(":a"), 2, immediates("f", "i").field("f", &TxnArrayFields)},
{0x37, "gtxna", opGtxna, proto(":a"), 2, immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
- {0x38, "gtxns", opGtxns, proto("i:a"), 3, immediates("f").field("f", &TxnFields).assembler(asmGtxns)},
+ {0x38, "gtxns", opGtxns, proto("i:a"), 3, immediates("f").field("f", &TxnScalarFields)},
{0x39, "gtxnsa", opGtxnsa, proto("i:a"), 3, immediates("f", "i").field("f", &TxnArrayFields)},
// Group scratch space access
{0x3a, "gload", opGload, proto(":a"), 4, immediates("t", "i").only(modeApp)},
@@ -592,6 +592,10 @@ var OpSpecs = []OpSpec{
{0xc4, "gloadss", opGloadss, proto("ii:a"), 6, only(modeApp)},
{0xc5, "itxnas", opItxnas, proto("i:a"), 6, field("f", &TxnArrayFields).only(modeApp)},
{0xc6, "gitxnas", opGitxnas, proto("i:a"), 6, immediates("t", "f").field("f", &TxnArrayFields).only(modeApp)},
+
+ // randomness support
+ {0xd0, "vrf_verify", opVrfVerify, proto("bbb:bi"), randomnessVersion, field("s", &VrfStandards).costs(5700)},
+ {0xd1, "block", opBlock, proto("i:a"), randomnessVersion, field("f", &BlockFields)},
}
type sortByOpcode []OpSpec
@@ -652,15 +656,15 @@ var opsByOpcode [LogicVersion + 1][256]OpSpec
// OpsByName map for each version, mapping opcode name to OpSpec
var OpsByName [LogicVersion + 1]map[string]OpSpec
-// Migration from TEAL v1 to TEAL v2.
-// TEAL v1 allowed execution of program with version 0.
-// With TEAL v2 opcode versions are introduced and they are bound to every opcode.
-// There is no opcodes with version 0 so that TEAL v2 evaluator rejects any program with version 0.
-// To preserve backward compatibility version 0 array is populated with TEAL v1 opcodes
+// Migration from v1 to v2.
+// v1 allowed execution of program with version 0.
+// With v2 opcode versions are introduced and they are bound to every opcode.
+// There is no opcodes with version 0 so that v2 evaluator rejects any program with version 0.
+// To preserve backward compatibility version 0 array is populated with v1 opcodes
// with the version overwritten to 0.
func init() {
// First, initialize baseline v1 opcodes.
- // Zero (empty) version is an alias for TEAL v1 opcodes and needed for compatibility with v1 code.
+ // Zero (empty) version is an alias for v1 opcodes and needed for compatibility with v1 code.
OpsByName[0] = make(map[string]OpSpec, 256)
OpsByName[1] = make(map[string]OpSpec, 256)
for _, oi := range OpSpecs {
@@ -674,7 +678,7 @@ func init() {
OpsByName[1][oi.Name] = oi
}
}
- // Start from v2 TEAL and higher,
+ // Start from v2 and higher,
// copy lower version opcodes and overwrite matching version
for v := uint64(2); v <= evalMaxVersion; v++ {
OpsByName[v] = make(map[string]OpSpec, 256)
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 68a0ef9c9..4acbc040e 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -207,7 +207,7 @@ func TestOpcodesVersioningV2(t *testing.T) {
// hardcode and ensure amount of new v2 opcodes
newOpcodes := 22
- overwritten := 5 // sha256, keccak256, sha512_256, txn, gtxn
+ overwritten := 3 // sha256, keccak256, sha512_256
require.Equal(t, newOpcodes+overwritten, cntAdded)
require.Equal(t, cntv2, cntv1+newOpcodes)
diff --git a/data/transactions/logic/sourcemap.go b/data/transactions/logic/sourcemap.go
index 3ffafd5e5..f78a1b88e 100644
--- a/data/transactions/logic/sourcemap.go
+++ b/data/transactions/logic/sourcemap.go
@@ -36,7 +36,9 @@ type SourceMap struct {
SourceRoot string `json:"sourceRoot,omitempty"`
Sources []string `json:"sources"`
Names []string `json:"names"`
- Mapping string `json:"mapping"`
+ // Mapping field is deprecated. Use `Mappings` field instead.
+ Mapping string `json:"mapping"`
+ Mappings string `json:"mappings"`
}
// GetSourceMap returns a struct containing details about
@@ -49,24 +51,25 @@ func GetSourceMap(sourceNames []string, offsetToLine map[int]int) SourceMap {
}
}
- // Array where index is the PC and value is the line.
+ // Array where index is the PC and value is the line for `mappings` field.
+ prevSourceLine := 0
pcToLine := make([]string, maxPC+1)
for pc := range pcToLine {
if line, ok := offsetToLine[pc]; ok {
- pcToLine[pc] = MakeSourceMapLine(0, 0, line, 0)
+ pcToLine[pc] = MakeSourceMapLine(0, 0, line-prevSourceLine, 0)
+ prevSourceLine = line
} else {
pcToLine[pc] = ""
}
}
- // Encode the source map into a string
- encodedMapping := strings.Join(pcToLine, ";")
-
return SourceMap{
Version: sourceMapVersion,
Sources: sourceNames,
Names: []string{}, // TEAL code does not generate any names.
- Mapping: encodedMapping,
+ // Mapping is deprecated, and only for backwards compatibility.
+ Mapping: strings.Join(pcToLine, ";"),
+ Mappings: strings.Join(pcToLine, ";"),
}
}
diff --git a/data/transactions/logic/sourcemap_test.go b/data/transactions/logic/sourcemap_test.go
index 718535ec4..7407f39dd 100644
--- a/data/transactions/logic/sourcemap_test.go
+++ b/data/transactions/logic/sourcemap_test.go
@@ -40,13 +40,15 @@ func TestGetSourceMap(t *testing.T) {
a.Equal(sourceNames, actualSourceMap.Sources)
a.Equal([]string{}, actualSourceMap.Names)
- // Check encoding for each line.
- splitMapping := strings.Split(actualSourceMap.Mapping, ";")
- for pc := range splitMapping {
+ // Check encoding for `mappings` field.
+ splitMappings := strings.Split(actualSourceMap.Mappings, ";")
+ prevLine := 0
+ for pc := range splitMappings {
if line, ok := offsetToLine[pc]; ok {
- a.Equal(MakeSourceMapLine(0, 0, line, 0), splitMapping[pc])
+ a.Equal(MakeSourceMapLine(0, 0, line-prevLine, 0), splitMappings[pc])
+ prevLine = line
} else {
- a.Equal("", splitMapping[pc])
+ a.Equal("", splitMappings[pc])
}
}
}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 31edc4418..127f12961 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -72,11 +72,11 @@
},
{
"name": "keyword.other.unit.teal",
- "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|log|min_balance)\\b"
+ "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|block|log|min_balance)\\b"
},
{
"name": "keyword.operator.teal",
- "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|vrf_verify|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
}
]
},
@@ -112,7 +112,7 @@
},
{
"name": "variable.parameter.teal",
- "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|ApplicationArgs|NumAppArgs|Accounts|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|Assets|NumAssets|Applications|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|Logs|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr)\\b"
+ "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|VrfAlgorand|BlkSeed|BlkTimestamp)\\b"
}
]
},
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 299d4da24..779a74d59 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -53,14 +53,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// CompactCertTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// EvalDelta
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -149,6 +141,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// StateProofTxnFields
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// Transaction
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -1517,158 +1517,6 @@ func (z *AssetTransferTxnFields) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *CompactCertTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if (*z).Cert.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).CertRound.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).CertType.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "cert"
- o = append(o, 0xa4, 0x63, 0x65, 0x72, 0x74)
- o = (*z).Cert.MarshalMsg(o)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- o = (*z).CertRound.MarshalMsg(o)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- o = (*z).CertType.MarshalMsg(o)
- }
- }
- return
-}
-
-func (_ *CompactCertTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactCertTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *CompactCertTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).CertRound.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).CertType.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Cert.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Cert")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = CompactCertTxnFields{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "certrnd":
- bts, err = (*z).CertRound.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound")
- return
- }
- case "certtype":
- bts, err = (*z).CertType.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType")
- return
- }
- case "cert":
- bts, err = (*z).Cert.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Cert")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *CompactCertTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactCertTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *CompactCertTxnFields) Msgsize() (s int) {
- s = 1 + 8 + (*z).CertRound.Msgsize() + 9 + (*z).CertType.Msgsize() + 5 + (*z).Cert.Msgsize()
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *CompactCertTxnFields) MsgIsZero() bool {
- return ((*z).CertRound.MsgIsZero()) && ((*z).CertType.MsgIsZero()) && ((*z).Cert.MsgIsZero())
-}
-
-// MarshalMsg implements msgp.Marshaler
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -4197,6 +4045,158 @@ func (z *SignedTxnWithAD) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *StateProofTxnFields) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(3)
+ var zb0001Mask uint8 /* 4 bits */
+ if (*z).StateProof.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).Message.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).StateProofType.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "sp"
+ o = append(o, 0xa2, 0x73, 0x70)
+ o = (*z).StateProof.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "spmsg"
+ o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67)
+ o = (*z).Message.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "sptype"
+ o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65)
+ o = (*z).StateProofType.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *StateProofTxnFields) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofTxnFields)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *StateProofTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofType.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofType")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProof.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProof")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Message.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Message")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = StateProofTxnFields{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "sptype":
+ bts, err = (*z).StateProofType.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofType")
+ return
+ }
+ case "sp":
+ bts, err = (*z).StateProof.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProof")
+ return
+ }
+ case "spmsg":
+ bts, err = (*z).Message.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Message")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *StateProofTxnFields) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofTxnFields)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *StateProofTxnFields) Msgsize() (s int) {
+ s = 1 + 7 + (*z).StateProofType.Msgsize() + 3 + (*z).StateProof.Msgsize() + 6 + (*z).Message.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *StateProofTxnFields) MsgIsZero() bool {
+ return ((*z).StateProofType.MsgIsZero()) && ((*z).StateProof.MsgIsZero()) && ((*z).Message.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -4278,83 +4278,83 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
zb0006Len--
zb0006Mask |= 0x8000000
}
- if (*z).CompactCertTxnFields.Cert.MsgIsZero() {
+ if (*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x10000000
}
- if (*z).CompactCertTxnFields.CertRound.MsgIsZero() {
+ if (*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x20000000
}
- if (*z).CompactCertTxnFields.CertType.MsgIsZero() {
+ if (*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x40000000
}
- if (*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero() {
+ if (*z).Header.Fee.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x80000000
}
- if (*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero() {
+ if (*z).Header.FirstValid.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x100000000
}
- if (*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero() {
+ if (*z).Header.GenesisID == "" {
zb0006Len--
zb0006Mask |= 0x200000000
}
- if (*z).Header.Fee.MsgIsZero() {
+ if (*z).Header.GenesisHash.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x400000000
}
- if (*z).Header.FirstValid.MsgIsZero() {
+ if (*z).Header.Group.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x800000000
}
- if (*z).Header.GenesisID == "" {
+ if (*z).Header.LastValid.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x1000000000
}
- if (*z).Header.GenesisHash.MsgIsZero() {
+ if (*z).Header.Lease == ([32]byte{}) {
zb0006Len--
zb0006Mask |= 0x2000000000
}
- if (*z).Header.Group.MsgIsZero() {
+ if (*z).KeyregTxnFields.Nonparticipation == false {
zb0006Len--
zb0006Mask |= 0x4000000000
}
- if (*z).Header.LastValid.MsgIsZero() {
+ if len((*z).Header.Note) == 0 {
zb0006Len--
zb0006Mask |= 0x8000000000
}
- if (*z).Header.Lease == ([32]byte{}) {
+ if (*z).PaymentTxnFields.Receiver.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x10000000000
}
- if (*z).KeyregTxnFields.Nonparticipation == false {
+ if (*z).Header.RekeyTo.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x20000000000
}
- if len((*z).Header.Note) == 0 {
+ if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x40000000000
}
- if (*z).PaymentTxnFields.Receiver.MsgIsZero() {
+ if (*z).Header.Sender.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x80000000000
}
- if (*z).Header.RekeyTo.MsgIsZero() {
+ if (*z).StateProofTxnFields.StateProof.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x100000000000
}
- if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() {
+ if (*z).StateProofTxnFields.Message.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x200000000000
}
- if (*z).Header.Sender.MsgIsZero() {
+ if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x400000000000
}
- if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() {
+ if (*z).StateProofTxnFields.StateProofType.MsgIsZero() {
zb0006Len--
zb0006Mask |= 0x800000000000
}
@@ -4509,105 +4509,105 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = (*z).AssetConfigTxnFields.ConfigAsset.MarshalMsg(o)
}
if (zb0006Mask & 0x10000000) == 0 { // if not empty
- // string "cert"
- o = append(o, 0xa4, 0x63, 0x65, 0x72, 0x74)
- o = (*z).CompactCertTxnFields.Cert.MarshalMsg(o)
- }
- if (zb0006Mask & 0x20000000) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- o = (*z).CompactCertTxnFields.CertRound.MarshalMsg(o)
- }
- if (zb0006Mask & 0x40000000) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- o = (*z).CompactCertTxnFields.CertType.MarshalMsg(o)
- }
- if (zb0006Mask & 0x80000000) == 0 { // if not empty
// string "close"
o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
o = (*z).PaymentTxnFields.CloseRemainderTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000) == 0 { // if not empty
+ if (zb0006Mask & 0x20000000) == 0 { // if not empty
// string "fadd"
o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAccount.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000) == 0 { // if not empty
+ if (zb0006Mask & 0x40000000) == 0 { // if not empty
// string "faid"
o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAsset.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000000) == 0 { // if not empty
+ if (zb0006Mask & 0x80000000) == 0 { // if not empty
// string "fee"
o = append(o, 0xa3, 0x66, 0x65, 0x65)
o = (*z).Header.Fee.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000) == 0 { // if not empty
+ if (zb0006Mask & 0x100000000) == 0 { // if not empty
// string "fv"
o = append(o, 0xa2, 0x66, 0x76)
o = (*z).Header.FirstValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x200000000) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Header.GenesisID)
}
- if (zb0006Mask & 0x2000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x400000000) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Header.GenesisHash.MarshalMsg(o)
}
- if (zb0006Mask & 0x4000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x800000000) == 0 { // if not empty
// string "grp"
o = append(o, 0xa3, 0x67, 0x72, 0x70)
o = (*z).Header.Group.MarshalMsg(o)
}
- if (zb0006Mask & 0x8000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x1000000000) == 0 { // if not empty
// string "lv"
o = append(o, 0xa2, 0x6c, 0x76)
o = (*z).Header.LastValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x10000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x2000000000) == 0 { // if not empty
// string "lx"
o = append(o, 0xa2, 0x6c, 0x78)
o = msgp.AppendBytes(o, ((*z).Header.Lease)[:])
}
- if (zb0006Mask & 0x20000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x4000000000) == 0 { // if not empty
// string "nonpart"
o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74)
o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation)
}
- if (zb0006Mask & 0x40000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x8000000000) == 0 { // if not empty
// string "note"
o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
o = msgp.AppendBytes(o, (*z).Header.Note)
}
- if (zb0006Mask & 0x80000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x10000000000) == 0 { // if not empty
// string "rcv"
o = append(o, 0xa3, 0x72, 0x63, 0x76)
o = (*z).PaymentTxnFields.Receiver.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x20000000000) == 0 { // if not empty
// string "rekey"
o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
o = (*z).Header.RekeyTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x40000000000) == 0 { // if not empty
// string "selkey"
o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x80000000000) == 0 { // if not empty
// string "snd"
o = append(o, 0xa3, 0x73, 0x6e, 0x64)
o = (*z).Header.Sender.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000000) == 0 { // if not empty
+ if (zb0006Mask & 0x100000000000) == 0 { // if not empty
+ // string "sp"
+ o = append(o, 0xa2, 0x73, 0x70)
+ o = (*z).StateProofTxnFields.StateProof.MarshalMsg(o)
+ }
+ if (zb0006Mask & 0x200000000000) == 0 { // if not empty
+ // string "spmsg"
+ o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67)
+ o = (*z).StateProofTxnFields.Message.MarshalMsg(o)
+ }
+ if (zb0006Mask & 0x400000000000) == 0 { // if not empty
// string "sprfkey"
o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x66, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.StateProofPK.MarshalMsg(o)
}
+ if (zb0006Mask & 0x800000000000) == 0 { // if not empty
+ // string "sptype"
+ o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65)
+ o = (*z).StateProofTxnFields.StateProofType.MarshalMsg(o)
+ }
if (zb0006Mask & 0x1000000000000) == 0 { // if not empty
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
@@ -5116,25 +5116,25 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0006 > 0 {
zb0006--
- bts, err = (*z).CompactCertTxnFields.CertRound.UnmarshalMsg(bts)
+ bts, err = (*z).StateProofTxnFields.StateProofType.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
+ err = msgp.WrapError(err, "struct-from-array", "StateProofType")
return
}
}
if zb0006 > 0 {
zb0006--
- bts, err = (*z).CompactCertTxnFields.CertType.UnmarshalMsg(bts)
+ bts, err = (*z).StateProofTxnFields.StateProof.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
+ err = msgp.WrapError(err, "struct-from-array", "StateProof")
return
}
}
if zb0006 > 0 {
zb0006--
- bts, err = (*z).CompactCertTxnFields.Cert.UnmarshalMsg(bts)
+ bts, err = (*z).StateProofTxnFields.Message.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Cert")
+ err = msgp.WrapError(err, "struct-from-array", "Message")
return
}
}
@@ -5531,22 +5531,22 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ExtraProgramPages")
return
}
- case "certrnd":
- bts, err = (*z).CompactCertTxnFields.CertRound.UnmarshalMsg(bts)
+ case "sptype":
+ bts, err = (*z).StateProofTxnFields.StateProofType.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CertRound")
+ err = msgp.WrapError(err, "StateProofType")
return
}
- case "certtype":
- bts, err = (*z).CompactCertTxnFields.CertType.UnmarshalMsg(bts)
+ case "sp":
+ bts, err = (*z).StateProofTxnFields.StateProof.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "CertType")
+ err = msgp.WrapError(err, "StateProof")
return
}
- case "cert":
- bts, err = (*z).CompactCertTxnFields.Cert.UnmarshalMsg(bts)
+ case "spmsg":
+ bts, err = (*z).StateProofTxnFields.Message.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "Cert")
+ err = msgp.WrapError(err, "Message")
return
}
default:
@@ -5585,13 +5585,13 @@ func (z *Transaction) Msgsize() (s int) {
for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].Msgsize()
}
- s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 8 + (*z).CompactCertTxnFields.CertRound.Msgsize() + 9 + (*z).CompactCertTxnFields.CertType.Msgsize() + 5 + (*z).CompactCertTxnFields.Cert.Msgsize()
+ s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *Transaction) MsgIsZero() bool {
- return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).CompactCertTxnFields.CertRound.MsgIsZero()) && ((*z).CompactCertTxnFields.CertType.MsgIsZero()) && ((*z).CompactCertTxnFields.Cert.MsgIsZero())
+ return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go
index daf552f98..ef4764d45 100644
--- a/data/transactions/msgp_gen_test.go
+++ b/data/transactions/msgp_gen_test.go
@@ -314,66 +314,6 @@ func BenchmarkUnmarshalAssetTransferTxnFields(b *testing.B) {
}
}
-func TestMarshalUnmarshalCompactCertTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := CompactCertTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingCompactCertTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &CompactCertTxnFields{})
-}
-
-func BenchmarkMarshalMsgCompactCertTxnFields(b *testing.B) {
- v := CompactCertTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgCompactCertTxnFields(b *testing.B) {
- v := CompactCertTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalCompactCertTxnFields(b *testing.B) {
- v := CompactCertTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
func TestMarshalUnmarshalEvalDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := EvalDelta{}
@@ -914,6 +854,66 @@ func BenchmarkUnmarshalSignedTxnWithAD(b *testing.B) {
}
}
+func TestMarshalUnmarshalStateProofTxnFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := StateProofTxnFields{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingStateProofTxnFields(t *testing.T) {
+ protocol.RunEncodingTest(t, &StateProofTxnFields{})
+}
+
+func BenchmarkMarshalMsgStateProofTxnFields(b *testing.B) {
+ v := StateProofTxnFields{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgStateProofTxnFields(b *testing.B) {
+ v := StateProofTxnFields{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalStateProofTxnFields(b *testing.B) {
+ v := StateProofTxnFields{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalTransaction(t *testing.T) {
partitiontest.PartitionTest(t)
v := Transaction{}
diff --git a/data/transactions/compactcert.go b/data/transactions/stateproof.go
index 4c0552026..35ffe0ff1 100644
--- a/data/transactions/compactcert.go
+++ b/data/transactions/stateproof.go
@@ -18,40 +18,31 @@ package transactions
import (
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/protocol"
)
-// CompactCertTxnFields captures the fields used for compact cert transactions.
-type CompactCertTxnFields struct {
+// StateProofTxnFields captures the fields used for stateproof transactions.
+type StateProofTxnFields struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- CertRound basics.Round `codec:"certrnd"`
- CertType protocol.CompactCertType `codec:"certtype"`
- Cert compactcert.Cert `codec:"cert"`
+ StateProofType protocol.StateProofType `codec:"sptype"`
+ StateProof stateproof.StateProof `codec:"sp"`
+ Message stateproofmsg.Message `codec:"spmsg"`
}
-// Empty returns whether the CompactCertTxnFields are all zero,
+// Empty returns whether the StateProofTxnFields are all zero,
// in the sense of being omitted in a msgpack encoding.
-func (cc CompactCertTxnFields) Empty() bool {
- if cc.CertRound != 0 {
- return false
- }
- if !cc.Cert.SigCommit.IsEmpty() || cc.Cert.SignedWeight != 0 {
- return false
- }
- if len(cc.Cert.SigProofs.Path) != 0 || len(cc.Cert.PartProofs.Path) != 0 {
- return false
- }
- if len(cc.Cert.Reveals) != 0 {
- return false
- }
- return true
+func (sp StateProofTxnFields) Empty() bool {
+ return sp.StateProofType == protocol.StateProofBasic &&
+ sp.StateProof.MsgIsZero() &&
+ sp.Message.MsgIsZero()
}
//msgp:ignore specialAddr
-// specialAddr is used to form a unique address that will send out compact certs.
+// specialAddr is used to form a unique address that will send out state proofs.
type specialAddr string
// ToBeHashed implements the crypto.Hashable interface
@@ -59,9 +50,9 @@ func (a specialAddr) ToBeHashed() (protocol.HashID, []byte) {
return protocol.SpecialAddr, []byte(a)
}
-// CompactCertSender is the computed address for sending out compact certs.
-var CompactCertSender basics.Address
+// StateProofSender is the computed address for sending out state proofs.
+var StateProofSender basics.Address
func init() {
- CompactCertSender = basics.Address(crypto.HashObj(specialAddr("CompactCertSender")))
+ StateProofSender = basics.Address(crypto.HashObj(specialAddr("StateProofSender")))
}
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 1c63c0c8a..6198a0663 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -97,7 +97,7 @@ type Transaction struct {
AssetTransferTxnFields
AssetFreezeTxnFields
ApplicationCallTxnFields
- CompactCertTxnFields
+ StateProofTxnFields
}
// ApplyData contains information about the transaction's execution.
@@ -290,6 +290,13 @@ var errKeyregTxnNotEmptyStateProofPK = errors.New("transaction field StateProofP
var errKeyregTxnNonParticipantShouldBeEmptyStateProofPK = errors.New("non participation keyreg transactions should contain empty stateProofPK")
var errKeyregTxnOfflineShouldBeEmptyStateProofPK = errors.New("offline keyreg transactions should contain empty stateProofPK")
var errKeyRegTxnValidityPeriodTooLong = errors.New("validity period for keyreg transaction is too long")
+var errStateProofNotSupported = errors.New("state proofs not supported")
+var errBadSenderInStateProofTxn = errors.New("sender must be the state-proof sender")
+var errFeeMustBeZeroInStateproofTxn = errors.New("fee must be zero in state-proof transaction")
+var errNoteMustBeEmptyInStateproofTxn = errors.New("note must be empty in state-proof transaction")
+var errGroupMustBeZeroInStateproofTxn = errors.New("group must be zero in state-proof transaction")
+var errRekeyToMustBeZeroInStateproofTxn = errors.New("rekey must be zero in state-proof transaction")
+var errLeaseMustBeZeroInStateproofTxn = errors.New("lease must be zero in state-proof transaction")
// WellFormed checks that the transaction looks reasonable on its own (but not necessarily valid against the actual ledger). It does not check signatures.
func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusParams) error {
@@ -343,7 +350,6 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
if !suppliesNullKeys {
return errKeyregTxnGoingOnlineWithNonParticipating
}
-
}
if err := tx.stateProofPKWellFormed(proto); err != nil {
@@ -472,32 +478,32 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
return fmt.Errorf("tx.GlobalStateSchema too large, max number of keys is %d", proto.MaxGlobalSchemaEntries)
}
- case protocol.CompactCertTx:
- if proto.CompactCertRounds == 0 {
- return fmt.Errorf("compact certs not supported")
+ case protocol.StateProofTx:
+ if proto.StateProofInterval == 0 {
+ return errStateProofNotSupported
}
- // This is a placeholder transaction used to store compact certs
+ // This is a placeholder transaction used to store state proofs
// on the ledger, and ensure they are broadly available. Most of
// the fields must be empty. It must be issued from a special
// sender address.
- if tx.Sender != CompactCertSender {
- return fmt.Errorf("sender must be the compact-cert sender")
+ if tx.Sender != StateProofSender {
+ return errBadSenderInStateProofTxn
}
if !tx.Fee.IsZero() {
- return fmt.Errorf("fee must be zero")
+ return errFeeMustBeZeroInStateproofTxn
}
if len(tx.Note) != 0 {
- return fmt.Errorf("note must be empty")
+ return errNoteMustBeEmptyInStateproofTxn
}
if !tx.Group.IsZero() {
- return fmt.Errorf("group must be zero")
+ return errGroupMustBeZeroInStateproofTxn
}
if !tx.RekeyTo.IsZero() {
- return fmt.Errorf("rekey must be zero")
+ return errRekeyToMustBeZeroInStateproofTxn
}
if tx.Lease != [32]byte{} {
- return fmt.Errorf("lease must be zero")
+ return errLeaseMustBeZeroInStateproofTxn
}
default:
@@ -529,8 +535,8 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
nonZeroFields[protocol.ApplicationCallTx] = true
}
- if !tx.CompactCertTxnFields.Empty() {
- nonZeroFields[protocol.CompactCertTx] = true
+ if !tx.StateProofTxnFields.Empty() {
+ nonZeroFields[protocol.StateProofTx] = true
}
for t, nonZero := range nonZeroFields {
@@ -540,8 +546,8 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
}
if !proto.EnableFeePooling && tx.Fee.LessThan(basics.MicroAlgos{Raw: proto.MinTxnFee}) {
- if tx.Type == protocol.CompactCertTx {
- // Zero fee allowed for compact cert txn.
+ if tx.Type == protocol.StateProofTx {
+ // Zero fee allowed for stateProof txn.
} else {
return makeMinFeeErrorf("transaction had fee %d, which is less than the minimum %d", tx.Fee.Raw, proto.MinTxnFee)
}
diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go
index 4536fb122..bac43c22c 100644
--- a/data/transactions/transaction_test.go
+++ b/data/transactions/transaction_test.go
@@ -22,14 +22,16 @@ import (
"strings"
"testing"
- "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
)
func TestTransaction_EstimateEncodedSize(t *testing.T) {
@@ -597,7 +599,7 @@ func TestWellFormedKeyRegistrationTx(t *testing.T) {
type keyRegTestCase struct {
votePK crypto.OneTimeSignatureVerifier
selectionPK crypto.VRFVerifier
- stateProofPK merklesignature.Verifier
+ stateProofPK merklesignature.Commitment
voteFirst basics.Round
voteLast basics.Round
lastValid basics.Round
@@ -611,8 +613,8 @@ func TestWellFormedKeyRegistrationTx(t *testing.T) {
votePKValue := crypto.OneTimeSignatureVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
selectionPKValue := crypto.VRFVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
- stateProofPK := merklesignature.Verifier([merklesignature.MerkleSignatureSchemeRootSize]byte{1})
- maxValidPeriod := config.Consensus[protocol.ConsensusFuture].MaxKeyregValidPeriod // TODO: change to curProto.MaxKeyregValidPeriod
+ stateProofPK := merklesignature.Commitment([merklesignature.MerkleSignatureSchemeRootSize]byte{1})
+ maxValidPeriod := config.Consensus[protocol.ConsensusCurrentVersion].MaxKeyregValidPeriod
runTestCase := func(testCase keyRegTestCase) error {
@@ -1223,17 +1225,17 @@ func TestWellFormedKeyRegistrationTx(t *testing.T) {
/* 510 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating},
/* 511 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid},
/* 512 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: stateProofPK, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: false, err: errKeyregTxnNotEmptyStateProofPK},
- /* 513 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: false, err: nil},
+ /* 513 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: false, err: nil},
/* 514 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: stateProofPK, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
- /* 515 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyRegEmptyStateProofPK},
+ /* 515 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyRegEmptyStateProofPK},
/* 516 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: stateProofPK, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyregTxnNonParticipantShouldBeEmptyStateProofPK},
- /* 517 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
+ /* 517 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
/* 518 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: stateProofPK, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyregTxnOfflineShouldBeEmptyStateProofPK},
- /* 519 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
- /* 520 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
- /* 521 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(10), voteLast: basics.Round(10 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
- /* 522 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(10), voteLast: basics.Round(10000 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyRegTxnValidityPeriodTooLong},
- /* 523 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Verifier{}, voteFirst: basics.Round(10), voteLast: basics.Round(10000 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: false, err: nil},
+ /* 519 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
+ /* 520 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
+ /* 521 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(10), voteLast: basics.Round(10 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: nil},
+ /* 522 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(10), voteLast: basics.Round(10000 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: true, err: errKeyRegTxnValidityPeriodTooLong},
+ /* 523 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, stateProofPK: merklesignature.Commitment{}, voteFirst: basics.Round(10), voteLast: basics.Round(10000 + maxValidPeriod), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, enableStateProofKeyregCheck: false, err: nil},
}
for testcaseIdx, testCase := range keyRegTestCases {
err := runTestCase(testCase)
@@ -1241,3 +1243,111 @@ func TestWellFormedKeyRegistrationTx(t *testing.T) {
require.Equalf(t, testCase.err, err, "index: %d\ntest case: %#v", testcaseIdx, testCase)
}
}
+
+type stateproofTxnTestCase struct {
+ expectedError error
+
+ StateProofInterval uint64
+ fee basics.MicroAlgos
+ note []byte
+ group crypto.Digest
+ lease [32]byte
+ rekeyValue basics.Address
+ sender basics.Address
+}
+
+func (s *stateproofTxnTestCase) runIsWellFormedForTestCase() error {
+ curProto := config.Consensus[protocol.ConsensusCurrentVersion]
+ curProto.StateProofInterval = s.StateProofInterval
+
+ // edit txn params. wanted
+ return Transaction{
+ Type: protocol.StateProofTx,
+ Header: Header{
+ Sender: s.sender,
+ Fee: s.fee,
+ FirstValid: 0,
+ LastValid: 0,
+ Note: s.note,
+ GenesisID: "",
+ GenesisHash: crypto.Digest{},
+ Group: s.group,
+ Lease: s.lease,
+ RekeyTo: s.rekeyValue,
+ },
+ StateProofTxnFields: StateProofTxnFields{},
+ }.WellFormed(SpecialAddresses{}, curProto)
+}
+
+func TestWellFormedStateProofTxn(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // want to create different Txns, run on all of these cases the check, and have an expected result
+ cases := []stateproofTxnTestCase{
+ /* 0 */ {expectedError: errStateProofNotSupported}, // StateProofInterval == 0 leads to error
+ /* 1 */ {expectedError: errBadSenderInStateProofTxn, StateProofInterval: 256, sender: basics.Address{1, 2, 3, 4}},
+ /* 2 */ {expectedError: errFeeMustBeZeroInStateproofTxn, StateProofInterval: 256, sender: StateProofSender, fee: basics.MicroAlgos{Raw: 1}},
+ /* 3 */ {expectedError: errNoteMustBeEmptyInStateproofTxn, StateProofInterval: 256, sender: StateProofSender, note: []byte{1, 2, 3}},
+ /* 4 */ {expectedError: errGroupMustBeZeroInStateproofTxn, StateProofInterval: 256, sender: StateProofSender, group: crypto.Digest{1, 2, 3}},
+ /* 5 */ {expectedError: errRekeyToMustBeZeroInStateproofTxn, StateProofInterval: 256, sender: StateProofSender, rekeyValue: basics.Address{1, 2, 3, 4}},
+ /* 6 */ {expectedError: errLeaseMustBeZeroInStateproofTxn, StateProofInterval: 256, sender: StateProofSender, lease: [32]byte{1, 2, 3, 4}},
+ /* 7 */ {expectedError: nil, StateProofInterval: 256, fee: basics.MicroAlgos{Raw: 0}, note: nil, group: crypto.Digest{}, lease: [32]byte{}, rekeyValue: basics.Address{}, sender: StateProofSender},
+ }
+ for i, testCase := range cases {
+ cpyTestCase := testCase
+ t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
+ t.Parallel()
+ require.Equal(t, cpyTestCase.expectedError, cpyTestCase.runIsWellFormedForTestCase())
+ })
+ }
+}
+
+func TestStateProofTxnShouldBeZero(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ addr1, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA")
+ require.NoError(t, err)
+
+ curProto := config.Consensus[protocol.ConsensusCurrentVersion]
+ curProto.StateProofInterval = 256
+ txn := Transaction{
+ Type: protocol.PaymentTx,
+ Header: Header{
+ Sender: addr1,
+ Fee: basics.MicroAlgos{Raw: 100},
+ FirstValid: 0,
+ LastValid: 0,
+ Note: []byte{0, 1},
+ GenesisID: "",
+ GenesisHash: crypto.Digest{},
+ },
+ StateProofTxnFields: StateProofTxnFields{},
+ }
+
+ const erroMsg = "type pay has non-zero fields for type stpf"
+ txn.StateProofType = 1
+ err = txn.WellFormed(SpecialAddresses{}, curProto)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), erroMsg)
+
+ txn.StateProofType = 0
+ txn.Message = stateproofmsg.Message{FirstAttestedRound: 1}
+ err = txn.WellFormed(SpecialAddresses{}, curProto)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), erroMsg)
+
+ txn.Message = stateproofmsg.Message{}
+ txn.StateProof = stateproof.StateProof{SignedWeight: 100}
+ err = txn.WellFormed(SpecialAddresses{}, curProto)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), erroMsg)
+
+ txn.StateProof = stateproof.StateProof{}
+ txn.Message.LastAttestedRound = 512
+ err = txn.WellFormed(SpecialAddresses{}, curProto)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), erroMsg)
+
+ txn.Message.LastAttestedRound = 0
+ err = txn.WellFormed(SpecialAddresses{}, curProto)
+ require.NoError(t, err)
+}
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 7f6fc2b9f..1d947d31a 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -64,13 +64,14 @@ type GroupContext struct {
specAddrs transactions.SpecialAddresses
consensusVersion protocol.ConsensusVersion
consensusParams config.ConsensusParams
- minTealVersion uint64
+ minAvmVersion uint64
signedGroupTxns []transactions.SignedTxn
+ ledger logic.LedgerForSignature
}
// PrepareGroupContext prepares a verification group parameter object for a given transaction
// group.
-func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader) (*GroupContext, error) {
+func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, ledger logic.LedgerForSignature) (*GroupContext, error) {
if len(group) == 0 {
return nil, nil
}
@@ -85,8 +86,9 @@ func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.
},
consensusVersion: contextHdr.CurrentProtocol,
consensusParams: consensusParams,
- minTealVersion: logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(group)),
+ minAvmVersion: logic.ComputeMinAvmVersion(transactions.WrapSignedTxnsWithAD(group)),
signedGroupTxns: group,
+ ledger: ledger,
}, nil
}
@@ -94,14 +96,13 @@ func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.
func (g *GroupContext) Equal(other *GroupContext) bool {
return g.specAddrs == other.specAddrs &&
g.consensusVersion == other.consensusVersion &&
- g.minTealVersion == other.minTealVersion
+ g.minAvmVersion == other.minAvmVersion
}
// Txn verifies a SignedTxn as being signed and having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
func Txn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
- useBatchVerification := groupCtx.consensusParams.EnableBatchVerification
- batchVerifier := crypto.MakeBatchVerifierDefaultSize(useBatchVerification)
+ batchVerifier := crypto.MakeBatchVerifier()
if err := TxnBatchVerify(s, txnIdx, groupCtx, batchVerifier); err != nil {
return err
@@ -133,13 +134,10 @@ func TxnBatchVerify(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
}
// TxnGroup verifies a []SignedTxn as being signed and having no obviously inconsistent data.
-func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache) (groupCtx *GroupContext, err error) {
+func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, ledger logic.LedgerForSignature) (groupCtx *GroupContext, err error) {
+ batchVerifier := crypto.MakeBatchVerifier()
- currentVersion := contextHdr.CurrentProtocol
- useBatchVerification := config.Consensus[currentVersion].EnableBatchVerification
- batchVerifier := crypto.MakeBatchVerifierDefaultSize(useBatchVerification)
-
- if groupCtx, err = TxnGroupBatchVerify(stxs, contextHdr, cache, batchVerifier); err != nil {
+ if groupCtx, err = TxnGroupBatchVerify(stxs, contextHdr, cache, ledger, batchVerifier); err != nil {
return nil, err
}
@@ -156,8 +154,8 @@ func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader,
// TxnGroupBatchVerify verifies a []SignedTxn having no obviously inconsistent data.
// it is the caller responsibility to call batchVerifier.verify()
-func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
- groupCtx, err = PrepareGroupContext(stxs, contextHdr)
+func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, ledger logic.LedgerForSignature, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
+ groupCtx, err = PrepareGroupContext(stxs, contextHdr, ledger)
if err != nil {
return nil, err
}
@@ -170,7 +168,7 @@ func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.B
err = fmt.Errorf("transaction %+v invalid : %w", stxn, err)
return
}
- if stxn.Txn.Type != protocol.CompactCertTx {
+ if stxn.Txn.Type != protocol.StateProofTx {
minFeeCount++
}
feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw)
@@ -214,11 +212,10 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
}
if numSigs == 0 {
// Special case: special sender address can issue special transaction
- // types (compact cert txn) without any signature. The well-formed
+ // types (state proof txn) without any signature. The well-formed
// check ensures that this transaction cannot pay any fee, and
- // cannot have any other interesting fields, except for the compact
- // cert payload.
- if s.Txn.Sender == transactions.CompactCertSender && s.Txn.Type == protocol.CompactCertTx {
+ // cannot have any other interesting fields, except for the state proof payload.
+ if s.Txn.Sender == transactions.StateProofSender && s.Txn.Type == protocol.StateProofTx {
return nil
}
@@ -242,7 +239,7 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
return errors.New("multisig validation failed")
}
if hasLogicSig {
- return logicSigBatchVerify(s, txnIdx, groupCtx, batchVerifier)
+ return logicSigBatchVerify(s, txnIdx, groupCtx)
}
return errors.New("has one mystery sig. WAT?")
}
@@ -250,8 +247,7 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
// LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
- useBatchVerification := groupCtx.consensusParams.EnableBatchVerification
- batchVerifier := crypto.MakeBatchVerifierDefaultSize(useBatchVerification)
+ batchVerifier := crypto.MakeBatchVerifier()
if err := LogicSigSanityCheckBatchVerify(txn, groupIndex, groupCtx, batchVerifier); err != nil {
return err
@@ -296,9 +292,10 @@ func LogicSigSanityCheckBatchVerify(txn *transactions.SignedTxn, groupIndex int,
}
txngroup := transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns)
ep := logic.EvalParams{
- Proto: &groupCtx.consensusParams,
- TxnGroup: txngroup,
- MinTealVersion: &groupCtx.minTealVersion,
+ Proto: &groupCtx.consensusParams,
+ TxnGroup: txngroup,
+ MinAvmVersion: &groupCtx.minAvmVersion,
+ SigLedger: groupCtx.ledger, // won't be needed for CheckSignature
}
err := logic.CheckSignature(groupIndex, &ep)
if err != nil {
@@ -341,7 +338,7 @@ func LogicSigSanityCheckBatchVerify(txn *transactions.SignedTxn, groupIndex int,
// logicSigBatchVerify checks that the signature is valid, executing the program.
// it is the caller responsibility to call batchVerifier.verify()
-func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchverifier *crypto.BatchVerifier) error {
+func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
err := LogicSigSanityCheck(txn, groupIndex, groupCtx)
if err != nil {
return err
@@ -351,9 +348,10 @@ func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *
return errors.New("Negative groupIndex")
}
ep := logic.EvalParams{
- Proto: &groupCtx.consensusParams,
- TxnGroup: transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns),
- MinTealVersion: &groupCtx.minTealVersion,
+ Proto: &groupCtx.consensusParams,
+ TxnGroup: transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns),
+ MinAvmVersion: &groupCtx.minAvmVersion,
+ SigLedger: groupCtx.ledger,
}
pass, err := logic.EvalSignature(groupIndex, &ep)
if err != nil {
@@ -375,7 +373,7 @@ func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *
// a PaysetGroups may be well-formed, but a payset might contain an overspend.
//
// This version of verify is performing the verification over the provided execution pool.
-func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHeader bookkeeping.BlockHeader, verificationPool execpool.BacklogPool, cache VerifiedTransactionCache) (err error) {
+func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHeader bookkeeping.BlockHeader, verificationPool execpool.BacklogPool, cache VerifiedTransactionCache, ledger logic.LedgerForSignature) (err error) {
if len(payset) == 0 {
return nil
}
@@ -384,8 +382,6 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea
worksets := make(chan struct{}, concurrentWorksets)
worksDoneCh := make(chan interface{}, concurrentWorksets)
processing := 0
- currentVersion := blkHeader.CurrentProtocol
- useBatchVerification := config.Consensus[currentVersion].EnableBatchVerification
tasksCtx, cancelTasksCtx := context.WithCancel(ctx)
defer cancelTasksCtx()
@@ -412,9 +408,9 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea
txnGroups := arg.([][]transactions.SignedTxn)
groupCtxs := make([]*GroupContext, len(txnGroups))
- batchVerifier := crypto.MakeBatchVerifier(len(payset), useBatchVerification)
+ batchVerifier := crypto.MakeBatchVerifierWithHint(len(payset))
for i, signTxnsGrp := range txnGroups {
- groupCtxs[i], grpErr = TxnGroupBatchVerify(signTxnsGrp, blkHeader, nil, batchVerifier)
+ groupCtxs[i], grpErr = TxnGroupBatchVerify(signTxnsGrp, blkHeader, nil, ledger, batchVerifier)
// abort only if it's a non-cache error.
if grpErr != nil {
return grpErr
diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go
index d13504610..399835211 100644
--- a/data/transactions/verify/txn_test.go
+++ b/data/transactions/verify/txn_test.go
@@ -114,7 +114,7 @@ func TestSignedPayment(t *testing.T) {
payments, stxns, secrets, addrs := generateTestObjects(1, 1, 0)
payment, stxn, secret, addr := payments[0], stxns[0], secrets[0], addrs[0]
- groupCtx, err := PrepareGroupContext(stxns, blockHeader)
+ groupCtx, err := PrepareGroupContext(stxns, blockHeader, nil)
require.NoError(t, err)
require.NoError(t, payment.WellFormed(spec, proto), "generateTestObjects generated an invalid payment")
require.NoError(t, Txn(&stxn, 0, groupCtx), "generateTestObjects generated a bad signedtxn")
@@ -126,7 +126,7 @@ func TestSignedPayment(t *testing.T) {
require.Equal(t, stxn.ID(), stxn2.ID(), "changing sig caused txid to change")
require.Error(t, Txn(&stxn2, 0, groupCtx), "verify succeeded with bad sig")
- require.True(t, crypto.SignatureVerifier(addr).Verify(payment, stxn.Sig, true), "signature on the transaction is not the signature of the hash of the transaction under the spender's key")
+ require.True(t, crypto.SignatureVerifier(addr).Verify(payment, stxn.Sig), "signature on the transaction is not the signature of the hash of the transaction under the spender's key")
}
func TestTxnValidationEncodeDecode(t *testing.T) {
@@ -135,7 +135,7 @@ func TestTxnValidationEncodeDecode(t *testing.T) {
_, signed, _, _ := generateTestObjects(100, 50, 0)
for _, txn := range signed {
- groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader)
+ groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
if Txn(&txn, 0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
@@ -157,7 +157,7 @@ func TestTxnValidationEmptySig(t *testing.T) {
_, signed, _, _ := generateTestObjects(100, 50, 0)
for _, txn := range signed {
- groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader)
+ groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
if Txn(&txn, 0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
@@ -172,20 +172,20 @@ func TestTxnValidationEmptySig(t *testing.T) {
}
}
-const ccProto = protocol.ConsensusVersion("test-compact-cert-enabled")
+const spProto = protocol.ConsensusVersion("test-state-proof-enabled")
-func TestTxnValidationCompactCert(t *testing.T) {
+func TestTxnValidationStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- proto.CompactCertRounds = 256
- config.Consensus[ccProto] = proto
+ proto.StateProofInterval = 256
+ config.Consensus[spProto] = proto
stxn := transactions.SignedTxn{
Txn: transactions.Transaction{
- Type: protocol.CompactCertTx,
+ Type: protocol.StateProofTx,
Header: transactions.Header{
- Sender: transactions.CompactCertSender,
+ Sender: transactions.StateProofSender,
FirstValid: 0,
LastValid: 10,
},
@@ -198,21 +198,21 @@ func TestTxnValidationCompactCert(t *testing.T) {
RewardsPool: poolAddr,
},
UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: ccProto,
+ CurrentProtocol: spProto,
},
}
- groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, blockHeader)
+ groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, blockHeader, nil)
require.NoError(t, err)
err = Txn(&stxn, 0, groupCtx)
- require.NoError(t, err, "compact cert txn %#v did not verify", stxn)
+ require.NoError(t, err, "state proof txn %#v did not verify", stxn)
stxn2 := stxn
stxn2.Txn.Type = protocol.PaymentTx
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "payment txn %#v verified from CompactCertSender", stxn2)
+ require.Error(t, err, "payment txn %#v verified from StateProofSender", stxn2)
secret := keypair()
stxn2 = stxn
@@ -220,28 +220,28 @@ func TestTxnValidationCompactCert(t *testing.T) {
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
stxn2 = stxn2.Txn.Sign(secret)
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "compact cert txn %#v verified from non-CompactCertSender", stxn2)
+ require.Error(t, err, "state proof txn %#v verified from non-StateProofSender", stxn2)
- // Compact cert txns are not allowed to have non-zero values for many fields
+ // state proof txns are not allowed to have non-zero values for many fields
stxn2 = stxn
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "compact cert txn %#v verified", stxn2)
+ require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Header.Note = []byte{'A'}
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "compact cert txn %#v verified", stxn2)
+ require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Lease[0] = 1
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "compact cert txn %#v verified", stxn2)
+ require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.RekeyTo = basics.Address(secret.SignatureVerifier)
err = Txn(&stxn2, 0, groupCtx)
- require.Error(t, err, "compact cert txn %#v verified", stxn2)
+ require.Error(t, err, "state proof txn %#v verified", stxn2)
}
func TestDecodeNil(t *testing.T) {
@@ -256,7 +256,7 @@ func TestDecodeNil(t *testing.T) {
err := protocol.Decode(nilEncoding, &st)
if err == nil {
// This used to panic when run on a zero value of SignedTxn.
- groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{st}, blockHeader)
+ groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{st}, blockHeader, nil)
require.NoError(t, err)
Txn(&st, 0, groupCtx)
}
@@ -285,17 +285,17 @@ func TestPaysetGroups(t *testing.T) {
txnGroups := generateTransactionGroups(signedTxn, secrets, addrs)
startPaysetGroupsTime := time.Now()
- err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000))
+ err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil)
require.NoError(t, err)
paysetGroupDuration := time.Now().Sub(startPaysetGroupsTime)
// break the signature and see if it fails.
txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] + 1
- err = PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000))
+ err = PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil)
require.Error(t, err)
// ensure the rest are fine
- err = PaysetGroups(context.Background(), txnGroups[1:], blkHdr, verificationPool, MakeVerifiedTransactionCache(50000))
+ err = PaysetGroups(context.Background(), txnGroups[1:], blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil)
require.NoError(t, err)
// test the context cancelation:
@@ -312,7 +312,7 @@ func TestPaysetGroups(t *testing.T) {
go func() {
defer close(waitCh)
cache := MakeVerifiedTransactionCache(50000)
- waitCh <- PaysetGroups(ctx, txnGroups, blkHdr, verificationPool, cache)
+ waitCh <- PaysetGroups(ctx, txnGroups, blkHdr, verificationPool, cache, nil)
}()
startPaysetGroupsTime = time.Now()
select {
@@ -366,7 +366,7 @@ func BenchmarkPaysetGroups(b *testing.B) {
cache := MakeVerifiedTransactionCache(50000)
b.ResetTimer()
- err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, cache)
+ err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, cache, nil)
require.NoError(b, err)
b.StopTimer()
}
@@ -422,7 +422,7 @@ func BenchmarkTxn(b *testing.B) {
b.ResetTimer()
for _, txnGroup := range txnGroups {
- groupCtx, err := PrepareGroupContext(txnGroup, blk.BlockHeader)
+ groupCtx, err := PrepareGroupContext(txnGroup, blk.BlockHeader, nil)
require.NoError(b, err)
for i, txn := range txnGroup {
err := Txn(&txn, i, groupCtx)
diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go
index 06798f3b4..b41993be9 100644
--- a/data/transactions/verify/verifiedTxnCache.go
+++ b/data/transactions/verify/verifiedTxnCache.go
@@ -128,7 +128,7 @@ func (v *verifiedTransactionCache) GetUnverifiedTranscationGroups(txnGroups [][]
for txnGroupIndex := 0; txnGroupIndex < len(txnGroups); txnGroupIndex++ {
signedTxnGroup := txnGroups[txnGroupIndex]
verifiedTxn := 0
- groupCtx.minTealVersion = logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(signedTxnGroup))
+ groupCtx.minAvmVersion = logic.ComputeMinAvmVersion(transactions.WrapSignedTxnsWithAD(signedTxnGroup))
baseBucket := v.base
for txnIdx := 0; txnIdx < len(signedTxnGroup); txnIdx++ {
diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go
index af8e36b42..35d958e35 100644
--- a/data/transactions/verify/verifiedTxnCache_test.go
+++ b/data/transactions/verify/verifiedTxnCache_test.go
@@ -34,7 +34,7 @@ func TestAddingToCache(t *testing.T) {
impl := icache.(*verifiedTransactionCache)
_, signedTxn, secrets, addrs := generateTestObjects(10, 5, 50)
txnGroups := generateTransactionGroups(signedTxn, secrets, addrs)
- groupCtx, err := PrepareGroupContext(txnGroups[0], blockHeader)
+ groupCtx, err := PrepareGroupContext(txnGroups[0], blockHeader, nil)
require.NoError(t, err)
impl.Add(txnGroups[0], groupCtx)
// make it was added.
@@ -55,7 +55,7 @@ func TestBucketCycling(t *testing.T) {
_, signedTxn, _, _ := generateTestObjects(entriesPerBucket*bucketCount*2, bucketCount, 0)
require.Equal(t, entriesPerBucket*bucketCount*2, len(signedTxn))
- groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{signedTxn[0]}, blockHeader)
+ groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{signedTxn[0]}, blockHeader, nil)
require.NoError(t, err)
// fill up the cache with entries.
@@ -92,7 +92,7 @@ func TestGetUnverifiedTranscationGroups50(t *testing.T) {
if i%2 == 0 {
expectedUnverifiedGroups = append(expectedUnverifiedGroups, txnGroups[i])
} else {
- groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader)
+ groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil)
impl.Add(txnGroups[i], groupCtx)
}
}
@@ -116,7 +116,7 @@ func BenchmarkGetUnverifiedTranscationGroups50(b *testing.B) {
if i%2 == 1 {
queryTxnGroups = append(queryTxnGroups, txnGroups[i])
} else {
- groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader)
+ groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil)
impl.Add(txnGroups[i], groupCtx)
}
}
@@ -145,7 +145,7 @@ func TestUpdatePinned(t *testing.T) {
// insert some entries.
for i := 0; i < len(txnGroups); i++ {
- groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader)
+ groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil)
impl.Add(txnGroups[i], groupCtx)
}
@@ -174,7 +174,7 @@ func TestPinningTransactions(t *testing.T) {
// insert half of the entries.
for i := 0; i < len(txnGroups)/2; i++ {
- groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader)
+ groupCtx, _ := PrepareGroupContext(txnGroups[i], blockHeader, nil)
impl.Add(txnGroups[i], groupCtx)
}
diff --git a/data/txHandler.go b/data/txHandler.go
index 1d4d1a500..46248b4ed 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -23,6 +23,7 @@ import (
"io"
"sync"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/pools"
@@ -38,7 +39,8 @@ import (
// The size txBacklogSize used to determine the size of the backlog that is used to store incoming transaction messages before starting dropping them.
// It should be configured to be higher then the number of CPU cores, so that the execution pool get saturated, but not too high to avoid lockout of the
// execution pool for a long duration of time.
-const txBacklogSize = 1000
+// Set backlog at 'approximately one block' by dividing block size by a typical transaction size.
+var txBacklogSize = config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnBytesPerBlock / 200
var transactionMessagesHandled = metrics.MakeCounter(metrics.TransactionMessagesHandled)
var transactionMessagesDroppedFromBacklog = metrics.MakeCounter(metrics.TransactionMessagesDroppedFromBacklog)
@@ -203,7 +205,7 @@ func (handler *TxHandler) asyncVerifySignature(arg interface{}) interface{} {
logging.Base().Warnf("Could not get header for previous block %d: %v", latest, err)
} else {
// we can't use PaysetGroups here since it's using a execpool like this go-routine and we don't want to deadlock.
- _, tx.verificationErr = verify.TxnGroup(tx.unverifiedTxGroup, latestHdr, handler.ledger.VerifiedTransactionCache())
+ _, tx.verificationErr = verify.TxnGroup(tx.unverifiedTxGroup, latestHdr, handler.ledger.VerifiedTransactionCache(), handler.ledger)
}
select {
@@ -295,7 +297,7 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
}
unverifiedTxnGroups := bookkeeping.SignedTxnsToGroups(unverifiedTxGroup)
- err = verify.PaysetGroups(context.Background(), unverifiedTxnGroups, latestHdr, handler.txVerificationPool, handler.ledger.VerifiedTransactionCache())
+ err = verify.PaysetGroups(context.Background(), unverifiedTxnGroups, latestHdr, handler.txVerificationPool, handler.ledger.VerifiedTransactionCache(), handler.ledger)
if err != nil {
// transaction is invalid
logging.Base().Warnf("One or more transactions were malformed: %v", err)
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
index c137b171f..988753a0d 100644
--- a/data/txntest/txn.go
+++ b/data/txntest/txn.go
@@ -39,8 +39,9 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
@@ -100,9 +101,9 @@ type Txn struct {
ClearStateProgram interface{} // string, nil or []bytes if already compiled
ExtraProgramPages uint32
- CertRound basics.Round
- CertType protocol.CompactCertType
- Cert compactcert.Cert
+ StateProofType protocol.StateProofType
+ StateProof stateproof.StateProof
+ StateProofMsg stateproofmsg.Message
}
// Noted returns a new Txn with the given note field.
@@ -240,10 +241,10 @@ func (tx Txn) Txn() transactions.Transaction {
ClearStateProgram: assemble(tx.ClearStateProgram),
ExtraProgramPages: tx.ExtraProgramPages,
},
- CompactCertTxnFields: transactions.CompactCertTxnFields{
- CertRound: tx.CertRound,
- CertType: tx.CertType,
- Cert: tx.Cert,
+ StateProofTxnFields: transactions.StateProofTxnFields{
+ StateProofType: tx.StateProofType,
+ StateProof: tx.StateProof,
+ Message: tx.StateProofMsg,
},
}
}
diff --git a/gen/generate.go b/gen/generate.go
index dc76a485d..804e893c2 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -272,7 +272,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
data.VoteLastValid = part.LastValid
data.VoteKeyDilution = part.KeyDilution
if protoParams.EnableStateProofKeyregCheck {
- data.StateProofID = *part.StateProofVerifier()
+ data.StateProofID = part.StateProofVerifier().Commitment
}
}
diff --git a/gen/generate_test.go b/gen/generate_test.go
index 0d377a2b6..fbffe9467 100644
--- a/gen/generate_test.go
+++ b/gen/generate_test.go
@@ -18,8 +18,6 @@ package gen
import (
"fmt"
- "io/ioutil"
- "os"
"path/filepath"
"strings"
"sync"
@@ -37,9 +35,7 @@ import (
func TestLoadMultiRootKeyConcurrent(t *testing.T) {
t.Skip() // skip in auto-test mode
a := require.New(t)
- tempDir, err := ioutil.TempDir("", "loadkey-test-")
- a.NoError(err)
- defer os.RemoveAll(tempDir)
+ tempDir := t.TempDir()
const numThreads = 100
var wg sync.WaitGroup
@@ -78,9 +74,7 @@ func TestLoadMultiRootKeyConcurrent(t *testing.T) {
func TestLoadSingleRootKeyConcurrent(t *testing.T) {
t.Skip() // skip in auto-test mode
a := require.New(t)
- tempDir, err := ioutil.TempDir("", "loadkey-test-")
- a.NoError(err)
- defer os.RemoveAll(tempDir)
+ tempDir := t.TempDir()
wallet := filepath.Join(tempDir, "wallet1")
rootDB, err := db.MakeErasableAccessor(wallet)
diff --git a/go.mod b/go.mod
index 5ab375956..dd46477e8 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/algorand/go-algorand
go 1.17
require (
- github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123
+ github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
github.com/algorand/go-codec/codec v1.1.8
github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
@@ -20,6 +20,7 @@ require (
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f
github.com/getkin/kin-openapi v0.22.0
github.com/gofrs/flock v0.7.0
+ github.com/golang/snappy v0.0.4
github.com/google/go-querystring v1.0.0
github.com/gorilla/mux v1.6.2
github.com/jmoiron/sqlx v1.2.0
diff --git a/go.sum b/go.sum
index 93b075f40..0537d1101 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123 h1:cnUjJ/iqUjJNbhUzgmxbfwHMVFnz+DLnNQx8uJcGaks=
-github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
+github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
+github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U=
github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA=
@@ -52,6 +52,8 @@ github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblf
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
diff --git a/installer/config.json.example b/installer/config.json.example
index c569e4c93..76b1700a7 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 22,
+ "Version": 23,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 7,
@@ -64,6 +64,7 @@
"LogArchiveName": "node.archive.log",
"LogSizeLimit": 1073741824,
"MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
"MaxCatchpointDownloadDuration": 7200000000000,
"MaxConnectionsPerIP": 30,
"MinCatchpointFileDownloadBytesPerSecond": 20480,
@@ -79,7 +80,7 @@
"PeerConnectionsUpdateInterval": 3600,
"PeerPingPeriodSeconds": 0,
"PriorityPeers": {},
- "ProposalAssemblyTime": 250000000,
+ "ProposalAssemblyTime": 500000000,
"PublicAddress": "",
"ReconnectTime": 60000000000,
"ReservedFDs": 256,
@@ -96,10 +97,10 @@
"TransactionSyncDataExchangeRate": 0,
"TransactionSyncSignificantMessageThreshold": 0,
"TxPoolExponentialIncreaseFactor": 2,
- "TxPoolSize": 15000,
+ "TxPoolSize": 75000,
"TxSyncIntervalSeconds": 60,
"TxSyncServeResponseSize": 1000000,
"TxSyncTimeoutSeconds": 30,
"UseXForwardedForAddressField": "",
- "VerifiedTranscationsCacheSize": 30000
+ "VerifiedTranscationsCacheSize": 150000
}
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index a438f578d..be242e13e 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -20,8 +20,10 @@ import (
"bytes"
"context"
"database/sql"
+ "encoding/hex"
"errors"
"fmt"
+ "strings"
"time"
"github.com/mattn/go-sqlite3"
@@ -31,8 +33,12 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
@@ -40,19 +46,17 @@ import (
// accountsDbQueries is used to cache a prepared SQL statement to look up
// the state of a single account.
type accountsDbQueries struct {
- listCreatablesStmt *sql.Stmt
- lookupStmt *sql.Stmt
- lookupResourcesStmt *sql.Stmt
- lookupAllResourcesStmt *sql.Stmt
- lookupCreatorStmt *sql.Stmt
- deleteStoredCatchpoint *sql.Stmt
- insertStoredCatchpoint *sql.Stmt
- selectOldestCatchpointFiles *sql.Stmt
- selectCatchpointStateUint64 *sql.Stmt
- deleteCatchpointState *sql.Stmt
- insertCatchpointStateUint64 *sql.Stmt
- selectCatchpointStateString *sql.Stmt
- insertCatchpointStateString *sql.Stmt
+ listCreatablesStmt *sql.Stmt
+ lookupStmt *sql.Stmt
+ lookupResourcesStmt *sql.Stmt
+ lookupAllResourcesStmt *sql.Stmt
+ lookupCreatorStmt *sql.Stmt
+}
+
+type onlineAccountsDbQueries struct {
+ lookupOnlineStmt *sql.Stmt
+ lookupOnlineHistoryStmt *sql.Stmt
+ lookupOnlineTotalsStmt *sql.Stmt
}
var accountsSchema = []string{
@@ -98,8 +102,13 @@ var creatablesMigration = []string{
// createNormalizedOnlineBalanceIndex handles accountbase/catchpointbalances tables
func createNormalizedOnlineBalanceIndex(idxname string, tablename string) string {
return fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s
- ON %s ( normalizedonlinebalance, address, data )
- WHERE normalizedonlinebalance>0`, idxname, tablename)
+ ON %s ( normalizedonlinebalance, address, data ) WHERE normalizedonlinebalance>0`, idxname, tablename)
+}
+
+// createNormalizedOnlineBalanceIndexOnline handles onlineaccounts/catchpointonlineaccounts tables
+func createNormalizedOnlineBalanceIndexOnline(idxname string, tablename string) string {
+ return fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s
+ ON %s ( normalizedonlinebalance, address )`, idxname, tablename)
}
func createUniqueAddressBalanceIndex(idxname string, tablename string) string {
@@ -120,6 +129,41 @@ var createResourcesTable = []string{
PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID`,
}
+var createOnlineAccountsTable = []string{
+ `CREATE TABLE IF NOT EXISTS onlineaccounts (
+ address BLOB NOT NULL,
+ updround INTEGER NOT NULL,
+ normalizedonlinebalance INTEGER NOT NULL,
+ votelastvalid INTEGER NOT NULL,
+ data BLOB NOT NULL,
+ PRIMARY KEY (address, updround) )`,
+ createNormalizedOnlineBalanceIndexOnline("onlineaccountnorm", "onlineaccounts"),
+}
+
+var createTxTailTable = []string{
+ `CREATE TABLE IF NOT EXISTS txtail (
+ rnd INTEGER PRIMARY KEY NOT NULL,
+ data BLOB NOT NULL)`,
+}
+
+var createOnlineRoundParamsTable = []string{
+ `CREATE TABLE IF NOT EXISTS onlineroundparamstail(
+ rnd INTEGER NOT NULL PRIMARY KEY,
+ data BLOB NOT NULL)`, // contains a msgp encoded OnlineRoundParamsData
+}
+
+// Table containing some metadata for a future catchpoint. The `info` column
+// contains a serialized object of type catchpointFirstStageInfo.
+const createCatchpointFirstStageInfoTable = `
+ CREATE TABLE IF NOT EXISTS catchpointfirststageinfo (
+ round integer primary key NOT NULL,
+ info BLOB NOT NULL)`
+
+const createUnfinishedCatchpointsTable = `
+ CREATE TABLE IF NOT EXISTS unfinishedcatchpoints (
+ round integer primary key NOT NULL,
+ blockhash blob NOT NULL)`
+
var accountsResetExprs = []string{
`DROP TABLE IF EXISTS acctrounds`,
`DROP TABLE IF EXISTS accounttotals`,
@@ -129,12 +173,17 @@ var accountsResetExprs = []string{
`DROP TABLE IF EXISTS catchpointstate`,
`DROP TABLE IF EXISTS accounthashes`,
`DROP TABLE IF EXISTS resources`,
+ `DROP TABLE IF EXISTS onlineaccounts`,
+ `DROP TABLE IF EXISTS txtail`,
+ `DROP TABLE IF EXISTS onlineroundparamstail`,
+ `DROP TABLE IF EXISTS catchpointfirststageinfo`,
+ `DROP TABLE IF EXISTS unfinishedcatchpoints`,
}
// accountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
-var accountDBVersion = int32(6)
+var accountDBVersion = int32(7)
// persistedAccountData is used for representing a single account stored on the disk. In addition to the
// basics.AccountData, it also stores complete referencing information used to maintain the base accounts
@@ -157,6 +206,18 @@ type persistedAccountData struct {
round basics.Round
}
+type persistedOnlineAccountData struct {
+ addr basics.Address
+ accountData baseOnlineAccountData
+ rowid int64
+ // the round number that is associated with the baseOnlineAccountData. This field is the corresponding one to the round field
+ // in persistedAccountData, and serves the same purpose. This value comes from account rounds table and correspond to
+ // the last trackers db commit round.
+ round basics.Round
+ // the round number that the online account is for, i.e. account state change round.
+ updRound basics.Round
+}
+
//msgp:ignore persistedResourcesData
type persistedResourcesData struct {
// addrid is the rowid of the account address that holds this resource.
@@ -240,15 +301,42 @@ type compactAccountDeltas struct {
misses []int
}
+// onlineAccountDelta track all changes of account state within a range,
+// used in conjunction wih compactOnlineAccountDeltas to group and represent per-account changes.
+// oldAcct represents the "old" state of the account in the DB, and compared against newAcct[0]
+// to determine if the acct became online or went offline.
+type onlineAccountDelta struct {
+ oldAcct persistedOnlineAccountData
+ newAcct []baseOnlineAccountData
+ nOnlineAcctDeltas int
+ address basics.Address
+ updRound []uint64
+ newStatus []basics.Status
+}
+
+type compactOnlineAccountDeltas struct {
+ // actual account deltas
+ deltas []onlineAccountDelta
+ // cache for addr to deltas index resolution
+ cache map[basics.Address]int
+ // misses holds indices of addresses for which old portion of delta needs to be loaded from disk
+ misses []int
+}
+
// catchpointState is used to store catchpoint related variables into the catchpointstate table.
type catchpointState string
const (
// catchpointStateLastCatchpoint is written by a node once a catchpoint label is created for a round
catchpointStateLastCatchpoint = catchpointState("lastCatchpoint")
- // catchpointStateWritingCatchpoint is written by a node while a catchpoint file is being created. It gets deleted once the file
- // creation is complete, and used as a way to record the fact that we've started generating the catchpoint file for that particular
- // round.
+ // This state variable is set to 1 if catchpoint's first stage is unfinished,
+ // and is 0 otherwise. Used to clear / restart the first stage after a crash.
+ // This key is set in the same db transaction as the account updates, so the
+ // unfinished first stage corresponds to the current db round.
+ catchpointStateWritingFirstStageInfo = catchpointState("writingFirstStageInfo")
+ // If there is an unfinished catchpoint, this state variable is set to
+ // the catchpoint's round. Otherwise, it is set to 0.
+ // DEPRECATED.
catchpointStateWritingCatchpoint = catchpointState("writingCatchpoint")
// catchpointCatchupState is the state of the catchup process. The variable is stored only during the catchpoint catchup process, and removed afterward.
catchpointStateCatchupState = catchpointState("catchpointCatchupState")
@@ -262,7 +350,8 @@ const (
// catchpointStateCatchupHashRound is the round that is associated with the hash of the merkle trie. Normally, it's identical to catchpointStateCatchupBalancesRound,
// however, it could differ when we catchup from a catchpoint that was created using a different version : in this case,
// we set it to zero in order to reset the merkle trie. This would force the merkle trie to be re-build on startup ( if needed ).
- catchpointStateCatchupHashRound = catchpointState("catchpointCatchupHashRound")
+ catchpointStateCatchupHashRound = catchpointState("catchpointCatchupHashRound")
+ catchpointStateCatchpointLookback = catchpointState("catchpointLookback")
)
// normalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables.
@@ -371,6 +460,7 @@ func prepareNormalizedBalancesV6(bals []encodedBalanceRecordV6, proto config.Con
// makeCompactResourceDeltas takes an array of AccountDeltas ( one array entry per round ), and compacts the resource portions of the arrays into a single
// data structure that contains all the resources deltas changes. While doing that, the function eliminate any intermediate resources changes.
// It counts the number of changes each account get modified across the round range by specifying it in the nAcctDeltas field of the resourcesDeltas.
+// As an optimization, accountDeltas is passed as a slice and must not be modified.
func makeCompactResourceDeltas(accountDeltas []ledgercore.AccountDeltas, baseRound basics.Round, setUpdateRound bool, baseAccounts lruAccounts, baseResources lruResources) (outResourcesDeltas compactResourcesDeltas) {
if len(accountDeltas) == 0 {
return
@@ -588,6 +678,7 @@ func (a *compactResourcesDeltas) updateOld(idx int, old persistedResourcesData)
// makeCompactAccountDeltas takes an array of account AccountDeltas ( one array entry per round ), and compacts the arrays into a single
// data structure that contains all the account deltas changes. While doing that, the function eliminate any intermediate account changes.
// It counts the number of changes each account get modified across the round range by specifying it in the nAcctDeltas field of the accountDeltaCount/modifiedCreatable.
+// As an optimization, accountDeltas is passed as a slice and must not be modified.
func makeCompactAccountDeltas(accountDeltas []ledgercore.AccountDeltas, baseRound basics.Round, setUpdateRound bool, baseAccounts lruAccounts) (outAccountDeltas compactAccountDeltas) {
if len(accountDeltas) == 0 {
return
@@ -731,6 +822,149 @@ func (a *compactAccountDeltas) updateOld(idx int, old persistedAccountData) {
a.deltas[idx].oldAcct = old
}
+func (c *onlineAccountDelta) append(acctDelta ledgercore.AccountData, deltaRound basics.Round) {
+ var baseEntry baseOnlineAccountData
+ baseEntry.SetCoreAccountData(&acctDelta)
+ c.newAcct = append(c.newAcct, baseEntry)
+ c.updRound = append(c.updRound, uint64(deltaRound))
+ c.newStatus = append(c.newStatus, acctDelta.Status)
+}
+
+// makeCompactAccountDeltas takes an array of account AccountDeltas ( one array entry per round ), and compacts the arrays into a single
+// data structure that contains all the account deltas changes. While doing that, the function eliminate any intermediate account changes.
+// It counts the number of changes each account get modified across the round range by specifying it in the nAcctDeltas field of the accountDeltaCount/modifiedCreatable.
+func makeCompactOnlineAccountDeltas(accountDeltas []ledgercore.AccountDeltas, baseRound basics.Round, baseOnlineAccounts lruOnlineAccounts) (outAccountDeltas compactOnlineAccountDeltas) {
+ if len(accountDeltas) == 0 {
+ return
+ }
+
+ // the sizes of the maps here aren't super accurate, but would hopefully be a rough estimate for a reasonable starting point.
+ size := accountDeltas[0].Len()*len(accountDeltas) + 1
+ outAccountDeltas.cache = make(map[basics.Address]int, size)
+ outAccountDeltas.deltas = make([]onlineAccountDelta, 0, size)
+ outAccountDeltas.misses = make([]int, 0, size)
+
+ deltaRound := baseRound
+ for _, roundDelta := range accountDeltas {
+ deltaRound++
+ for i := 0; i < roundDelta.Len(); i++ {
+ addr, acctDelta := roundDelta.GetByIdx(i)
+ if prev, idx := outAccountDeltas.get(addr); idx != -1 {
+ updEntry := prev
+ updEntry.nOnlineAcctDeltas++
+ updEntry.append(acctDelta, deltaRound)
+ outAccountDeltas.update(idx, updEntry)
+ } else {
+ // it's a new entry.
+ newEntry := onlineAccountDelta{
+ nOnlineAcctDeltas: 1,
+ address: addr,
+ }
+ newEntry.append(acctDelta, deltaRound)
+ // the cache always has the most recent data,
+ // including deleted/expired online accounts with empty voting data
+ if baseOnlineAccountData, has := baseOnlineAccounts.read(addr); has {
+ newEntry.oldAcct = baseOnlineAccountData
+ outAccountDeltas.insert(newEntry)
+ } else {
+ outAccountDeltas.insertMissing(newEntry)
+ }
+ }
+ }
+ }
+ return
+}
+
+// accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses.
+// The round number of the persistedAccountData is not updated by this function, and the caller is responsible
+// for populating this field.
+func (a *compactOnlineAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) {
+ if len(a.misses) == 0 {
+ return nil
+ }
+ // fetch the latest entry
+ selectStmt, err := tx.Prepare("SELECT rowid, data FROM onlineaccounts WHERE address=? ORDER BY updround DESC LIMIT 1")
+ if err != nil {
+ return
+ }
+ defer selectStmt.Close()
+ defer func() {
+ a.misses = nil
+ }()
+ var rowid sql.NullInt64
+ var acctDataBuf []byte
+ for _, idx := range a.misses {
+ addr := a.deltas[idx].address
+ err = selectStmt.QueryRow(addr[:]).Scan(&rowid, &acctDataBuf)
+ switch err {
+ case nil:
+ if len(acctDataBuf) > 0 {
+ persistedAcctData := &persistedOnlineAccountData{addr: addr, rowid: rowid.Int64}
+ err = protocol.Decode(acctDataBuf, &persistedAcctData.accountData)
+ if err != nil {
+ return err
+ }
+ a.updateOld(idx, *persistedAcctData)
+ } else {
+ // empty data means offline account
+ a.updateOld(idx, persistedOnlineAccountData{addr: addr, rowid: rowid.Int64})
+ }
+ case sql.ErrNoRows:
+ // we don't have that account, just return an empty record.
+ a.updateOld(idx, persistedOnlineAccountData{addr: addr})
+ err = nil
+ default:
+ // unexpected error - let the caller know that we couldn't complete the operation.
+ return err
+ }
+ }
+ return
+}
+
+// get returns accountDelta by address and its position.
+// if no such entry -1 returned
+func (a *compactOnlineAccountDeltas) get(addr basics.Address) (onlineAccountDelta, int) {
+ idx, ok := a.cache[addr]
+ if !ok {
+ return onlineAccountDelta{}, -1
+ }
+ return a.deltas[idx], idx
+}
+
+func (a *compactOnlineAccountDeltas) len() int {
+ return len(a.deltas)
+}
+
+func (a *compactOnlineAccountDeltas) getByIdx(i int) onlineAccountDelta {
+ return a.deltas[i]
+}
+
+// update replaces specific entry by idx
+func (a *compactOnlineAccountDeltas) update(idx int, delta onlineAccountDelta) {
+ a.deltas[idx] = delta
+}
+
+func (a *compactOnlineAccountDeltas) insert(delta onlineAccountDelta) int {
+ last := len(a.deltas)
+ a.deltas = append(a.deltas, delta)
+
+ if a.cache == nil {
+ a.cache = make(map[basics.Address]int)
+ }
+ a.cache[delta.address] = last
+ return last
+}
+
+func (a *compactOnlineAccountDeltas) insertMissing(delta onlineAccountDelta) {
+ idx := a.insert(delta)
+ a.misses = append(a.misses, idx)
+}
+
+// updateOld updates existing or inserts a new partial entry with only old field filled
+func (a *compactOnlineAccountDeltas) updateOld(idx int, old persistedOnlineAccountData) {
+ a.deltas[idx].oldAcct = old
+}
+
// writeCatchpointStagingBalances inserts all the account balances in the provided array into the catchpoint balance staging table catchpointbalances.
func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
insertAcctStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, normalizedonlinebalance, data) VALUES(?, ?, ?)")
@@ -877,7 +1111,7 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
"CREATE TABLE IF NOT EXISTS catchpointpendinghashes (data blob)",
"CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)",
"CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID",
- createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"),
+ createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ?
createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"),
)
}
@@ -896,20 +1130,15 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
// tables and update the correct balance round. This is the final step in switching onto the new catchpoint round.
func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRound basics.Round, merkleRootRound basics.Round) (err error) {
stmts := []string{
- "ALTER TABLE accountbase RENAME TO accountbase_old",
- "ALTER TABLE assetcreators RENAME TO assetcreators_old",
- "ALTER TABLE accounthashes RENAME TO accounthashes_old",
- "ALTER TABLE resources RENAME TO resources_old",
+ "DROP TABLE IF EXISTS accountbase",
+ "DROP TABLE IF EXISTS assetcreators",
+ "DROP TABLE IF EXISTS accounthashes",
+ "DROP TABLE IF EXISTS resources",
"ALTER TABLE catchpointbalances RENAME TO accountbase",
"ALTER TABLE catchpointassetcreators RENAME TO assetcreators",
"ALTER TABLE catchpointaccounthashes RENAME TO accounthashes",
"ALTER TABLE catchpointresources RENAME TO resources",
-
- "DROP TABLE IF EXISTS accountbase_old",
- "DROP TABLE IF EXISTS assetcreators_old",
- "DROP TABLE IF EXISTS accounthashes_old",
- "DROP TABLE IF EXISTS resources_old",
}
for _, stmt := range stmts {
@@ -932,8 +1161,8 @@ func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRou
return
}
-func getCatchpoint(tx *sql.Tx, round basics.Round) (fileName string, catchpoint string, fileSize int64, err error) {
- err = tx.QueryRow("SELECT filename, catchpoint, filesize FROM storedcatchpoints WHERE round=?", int64(round)).Scan(&fileName, &catchpoint, &fileSize)
+func getCatchpoint(ctx context.Context, q db.Queryable, round basics.Round) (fileName string, catchpoint string, fileSize int64, err error) {
+ err = q.QueryRowContext(ctx, "SELECT filename, catchpoint, filesize FROM storedcatchpoints WHERE round=?", int64(round)).Scan(&fileName, &catchpoint, &fileSize)
return
}
@@ -1075,7 +1304,56 @@ func accountsCreateResourceTable(ctx context.Context, tx *sql.Tx) error {
return nil
}
-type baseOnlineAccountData struct {
+func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
+ var exists bool
+ err := tx.QueryRowContext(ctx, "SELECT 1 FROM pragma_table_info('onlineaccounts') WHERE name='address'").Scan(&exists)
+ if err == nil {
+ // Already exists.
+ return nil
+ }
+ if err != sql.ErrNoRows {
+ return err
+ }
+ for _, stmt := range createOnlineAccountsTable {
+ _, err = tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
+ for _, stmt := range createTxTailTable {
+ _, err = tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return
+ }
+ }
+ return nil
+}
+
+func accountsCreateOnlineRoundParamsTable(ctx context.Context, tx *sql.Tx) (err error) {
+ for _, stmt := range createOnlineRoundParamsTable {
+ _, err = tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return
+ }
+ }
+ return nil
+}
+
+func accountsCreateCatchpointFirstStageInfoTable(ctx context.Context, e db.Executable) error {
+ _, err := e.ExecContext(ctx, createCatchpointFirstStageInfoTable)
+ return err
+}
+
+func accountsCreateUnfinishedCatchpointsTable(ctx context.Context, e db.Executable) error {
+ _, err := e.ExecContext(ctx, createUnfinishedCatchpointsTable)
+ return err
+}
+
+type baseVotingData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
VoteID crypto.OneTimeSignatureVerifier `codec:"A"`
@@ -1083,7 +1361,16 @@ type baseOnlineAccountData struct {
VoteFirstValid basics.Round `codec:"C"`
VoteLastValid basics.Round `codec:"D"`
VoteKeyDilution uint64 `codec:"E"`
- StateProofID merklesignature.Verifier `codec:"F"`
+ StateProofID merklesignature.Commitment `codec:"F"`
+}
+
+type baseOnlineAccountData struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ baseVotingData
+
+ MicroAlgos basics.MicroAlgos `codec:"Y"`
+ RewardsBase uint64 `codec:"Z"`
}
type baseAccountData struct {
@@ -1102,7 +1389,7 @@ type baseAccountData struct {
TotalAppParams uint64 `codec:"k"`
TotalAppLocalStates uint64 `codec:"l"`
- baseOnlineAccountData
+ baseVotingData
// UpdateRound is the round that modified this account data last. Since we want all the nodes to have the exact same
// value for this field, we'll be setting the value of this field to zero *before* the EnableAccountDataResourceSeparation
@@ -1125,12 +1412,7 @@ func (ba *baseAccountData) IsEmpty() bool {
ba.TotalAssets == 0 &&
ba.TotalAppParams == 0 &&
ba.TotalAppLocalStates == 0 &&
- ba.VoteID.MsgIsZero() &&
- ba.SelectionID.MsgIsZero() &&
- ba.StateProofID.MsgIsZero() &&
- ba.VoteFirstValid == 0 &&
- ba.VoteLastValid == 0 &&
- ba.VoteKeyDilution == 0
+ ba.baseVotingData.IsEmpty()
}
func (ba *baseAccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 {
@@ -1142,12 +1424,6 @@ func (ba *baseAccountData) SetCoreAccountData(ad *ledgercore.AccountData) {
ba.MicroAlgos = ad.MicroAlgos
ba.RewardsBase = ad.RewardsBase
ba.RewardedMicroAlgos = ad.RewardedMicroAlgos
- ba.VoteID = ad.VoteID
- ba.SelectionID = ad.SelectionID
- ba.StateProofID = ad.StateProofID
- ba.VoteFirstValid = ad.VoteFirstValid
- ba.VoteLastValid = ad.VoteLastValid
- ba.VoteKeyDilution = ad.VoteKeyDilution
ba.AuthAddr = ad.AuthAddr
ba.TotalAppSchemaNumUint = ad.TotalAppSchema.NumUint
ba.TotalAppSchemaNumByteSlice = ad.TotalAppSchema.NumByteSlice
@@ -1156,6 +1432,8 @@ func (ba *baseAccountData) SetCoreAccountData(ad *ledgercore.AccountData) {
ba.TotalAssets = ad.TotalAssets
ba.TotalAppParams = ad.TotalAppParams
ba.TotalAppLocalStates = ad.TotalAppLocalStates
+
+ ba.baseVotingData.SetCoreAccountData(ad)
}
func (ba *baseAccountData) SetAccountData(ad *basics.AccountData) {
@@ -1163,12 +1441,6 @@ func (ba *baseAccountData) SetAccountData(ad *basics.AccountData) {
ba.MicroAlgos = ad.MicroAlgos
ba.RewardsBase = ad.RewardsBase
ba.RewardedMicroAlgos = ad.RewardedMicroAlgos
- ba.VoteID = ad.VoteID
- ba.SelectionID = ad.SelectionID
- ba.StateProofID = ad.StateProofID
- ba.VoteFirstValid = ad.VoteFirstValid
- ba.VoteLastValid = ad.VoteLastValid
- ba.VoteKeyDilution = ad.VoteKeyDilution
ba.AuthAddr = ad.AuthAddr
ba.TotalAppSchemaNumUint = ad.TotalAppSchema.NumUint
ba.TotalAppSchemaNumByteSlice = ad.TotalAppSchema.NumByteSlice
@@ -1177,34 +1449,49 @@ func (ba *baseAccountData) SetAccountData(ad *basics.AccountData) {
ba.TotalAssets = uint64(len(ad.Assets))
ba.TotalAppParams = uint64(len(ad.AppParams))
ba.TotalAppLocalStates = uint64(len(ad.AppLocalStates))
+
+ ba.baseVotingData.VoteID = ad.VoteID
+ ba.baseVotingData.SelectionID = ad.SelectionID
+ ba.baseVotingData.StateProofID = ad.StateProofID
+ ba.baseVotingData.VoteFirstValid = ad.VoteFirstValid
+ ba.baseVotingData.VoteLastValid = ad.VoteLastValid
+ ba.baseVotingData.VoteKeyDilution = ad.VoteKeyDilution
}
func (ba *baseAccountData) GetLedgerCoreAccountData() ledgercore.AccountData {
return ledgercore.AccountData{
- AccountBaseData: ledgercore.AccountBaseData{
- Status: ba.Status,
- MicroAlgos: ba.MicroAlgos,
- RewardsBase: ba.RewardsBase,
- RewardedMicroAlgos: ba.RewardedMicroAlgos,
- AuthAddr: ba.AuthAddr,
- TotalAppSchema: basics.StateSchema{
- NumUint: ba.TotalAppSchemaNumUint,
- NumByteSlice: ba.TotalAppSchemaNumByteSlice,
- },
- TotalExtraAppPages: ba.TotalExtraAppPages,
- TotalAppParams: ba.TotalAppParams,
- TotalAppLocalStates: ba.TotalAppLocalStates,
- TotalAssetParams: ba.TotalAssetParams,
- TotalAssets: ba.TotalAssets,
- },
- VotingData: ledgercore.VotingData{
- VoteID: ba.VoteID,
- SelectionID: ba.SelectionID,
- StateProofID: ba.StateProofID,
- VoteFirstValid: ba.VoteFirstValid,
- VoteLastValid: ba.VoteLastValid,
- VoteKeyDilution: ba.VoteKeyDilution,
+ AccountBaseData: ba.GetLedgerCoreAccountBaseData(),
+ VotingData: ba.GetLedgerCoreVotingData(),
+ }
+}
+
+func (ba *baseAccountData) GetLedgerCoreAccountBaseData() ledgercore.AccountBaseData {
+ return ledgercore.AccountBaseData{
+ Status: ba.Status,
+ MicroAlgos: ba.MicroAlgos,
+ RewardsBase: ba.RewardsBase,
+ RewardedMicroAlgos: ba.RewardedMicroAlgos,
+ AuthAddr: ba.AuthAddr,
+ TotalAppSchema: basics.StateSchema{
+ NumUint: ba.TotalAppSchemaNumUint,
+ NumByteSlice: ba.TotalAppSchemaNumByteSlice,
},
+ TotalExtraAppPages: ba.TotalExtraAppPages,
+ TotalAppParams: ba.TotalAppParams,
+ TotalAppLocalStates: ba.TotalAppLocalStates,
+ TotalAssetParams: ba.TotalAssetParams,
+ TotalAssets: ba.TotalAssets,
+ }
+}
+
+func (ba *baseAccountData) GetLedgerCoreVotingData() ledgercore.VotingData {
+ return ledgercore.VotingData{
+ VoteID: ba.VoteID,
+ SelectionID: ba.SelectionID,
+ StateProofID: ba.StateProofID,
+ VoteFirstValid: ba.VoteFirstValid,
+ VoteLastValid: ba.VoteLastValid,
+ VoteKeyDilution: ba.VoteKeyDilution,
}
}
@@ -1214,21 +1501,95 @@ func (ba *baseAccountData) GetAccountData() basics.AccountData {
MicroAlgos: ba.MicroAlgos,
RewardsBase: ba.RewardsBase,
RewardedMicroAlgos: ba.RewardedMicroAlgos,
- VoteID: ba.VoteID,
- SelectionID: ba.SelectionID,
- StateProofID: ba.StateProofID,
- VoteFirstValid: ba.VoteFirstValid,
- VoteLastValid: ba.VoteLastValid,
- VoteKeyDilution: ba.VoteKeyDilution,
AuthAddr: ba.AuthAddr,
TotalAppSchema: basics.StateSchema{
NumUint: ba.TotalAppSchemaNumUint,
NumByteSlice: ba.TotalAppSchemaNumByteSlice,
},
TotalExtraAppPages: ba.TotalExtraAppPages,
+
+ VoteID: ba.VoteID,
+ SelectionID: ba.SelectionID,
+ StateProofID: ba.StateProofID,
+ VoteFirstValid: ba.VoteFirstValid,
+ VoteLastValid: ba.VoteLastValid,
+ VoteKeyDilution: ba.VoteKeyDilution,
+ }
+}
+
+// IsEmpty returns true if all of the fields are zero.
+func (bv baseVotingData) IsEmpty() bool {
+ return bv == baseVotingData{}
+}
+
+// SetCoreAccountData initializes baseVotingData from ledgercore.AccountData
+func (bv *baseVotingData) SetCoreAccountData(ad *ledgercore.AccountData) {
+ bv.VoteID = ad.VoteID
+ bv.SelectionID = ad.SelectionID
+ bv.StateProofID = ad.StateProofID
+ bv.VoteFirstValid = ad.VoteFirstValid
+ bv.VoteLastValid = ad.VoteLastValid
+ bv.VoteKeyDilution = ad.VoteKeyDilution
+}
+
+// IsVotingEmpty checks if voting data fields are empty
+func (bo *baseOnlineAccountData) IsVotingEmpty() bool {
+ return bo.baseVotingData.IsEmpty()
+}
+
+// IsEmpty return true if any of the fields are non-zero.
+func (bo *baseOnlineAccountData) IsEmpty() bool {
+ return bo.IsVotingEmpty() &&
+ bo.MicroAlgos.Raw == 0 &&
+ bo.RewardsBase == 0
+}
+
+// GetOnlineAccount returns ledgercore.OnlineAccount for top online accounts / voters
+// TODO: unify
+func (bo *baseOnlineAccountData) GetOnlineAccount(addr basics.Address, normBalance uint64) ledgercore.OnlineAccount {
+ return ledgercore.OnlineAccount{
+ Address: addr,
+ MicroAlgos: bo.MicroAlgos,
+ RewardsBase: bo.RewardsBase,
+ NormalizedOnlineBalance: normBalance,
+ VoteFirstValid: bo.VoteFirstValid,
+ VoteLastValid: bo.VoteLastValid,
+ StateProofID: bo.StateProofID,
+ }
+}
+
+// GetOnlineAccountData returns basics.OnlineAccountData for lookup agreement
+// TODO: unify with GetOnlineAccount/ledgercore.OnlineAccount
+func (bo *baseOnlineAccountData) GetOnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) ledgercore.OnlineAccountData {
+ microAlgos, _, _ := basics.WithUpdatedRewards(
+ proto, basics.Online, bo.MicroAlgos, basics.MicroAlgos{}, bo.RewardsBase, rewardsLevel,
+ )
+
+ return ledgercore.OnlineAccountData{
+ MicroAlgosWithRewards: microAlgos,
+ VotingData: ledgercore.VotingData{
+ VoteID: bo.VoteID,
+ SelectionID: bo.SelectionID,
+ StateProofID: bo.StateProofID,
+ VoteFirstValid: bo.VoteFirstValid,
+ VoteLastValid: bo.VoteLastValid,
+ VoteKeyDilution: bo.VoteKeyDilution,
+ },
}
}
+func (bo *baseOnlineAccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 {
+ return basics.NormalizedOnlineAccountBalance(basics.Online, bo.RewardsBase, bo.MicroAlgos, proto)
+}
+
+func (bo *baseOnlineAccountData) SetCoreAccountData(ad *ledgercore.AccountData) {
+ bo.baseVotingData.SetCoreAccountData(ad)
+
+ // MicroAlgos/RewardsBase are updated by the evaluator when accounts are touched
+ bo.MicroAlgos = ad.MicroAlgos
+ bo.RewardsBase = ad.RewardsBase
+}
+
type resourceFlags uint8
const (
@@ -1751,6 +2112,268 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
return nil
}
+func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Accessor) (err error) {
+ if tx == nil {
+ return nil
+ }
+
+ dbRound, err := accountsRound(tx)
+ if err != nil {
+ return fmt.Errorf("latest block number cannot be retrieved : %w", err)
+ }
+
+ // load the latest MaxTxnLife rounds in the txtail and store these in the txtail.
+ // when migrating there is only MaxTxnLife blocks in the block DB
+ // since the original txTail.commmittedUpTo preserved only (rnd+1)-MaxTxnLife = 1000 blocks back
+ err = blockDb.Atomic(func(ctx context.Context, blockTx *sql.Tx) error {
+ latestBlockRound, err := blockLatest(blockTx)
+ if err != nil {
+ return fmt.Errorf("latest block number cannot be retrieved : %w", err)
+ }
+ latestHdr, err := blockGetHdr(blockTx, dbRound)
+ if err != nil {
+ return fmt.Errorf("latest block header %d cannot be retrieved : %w", dbRound, err)
+ }
+
+ maxTxnLife := basics.Round(config.Consensus[latestHdr.CurrentProtocol].MaxTxnLife)
+ firstRound := (latestBlockRound + 1).SubSaturate(maxTxnLife)
+ // we don't need to have the txtail for round 0.
+ if firstRound == basics.Round(0) {
+ firstRound++
+ }
+ tailRounds := make([][]byte, 0, maxTxnLife)
+ for rnd := firstRound; rnd <= dbRound; rnd++ {
+ blk, err := blockGet(blockTx, rnd)
+ if err != nil {
+ return fmt.Errorf("block for round %d ( %d - %d ) cannot be retrieved : %w", rnd, firstRound, dbRound, err)
+ }
+
+ tail, err := txTailRoundFromBlock(blk)
+ if err != nil {
+ return err
+ }
+
+ encodedTail, _ := tail.encode()
+ tailRounds = append(tailRounds, encodedTail)
+ }
+
+ return txtailNewRound(ctx, tx, firstRound, tailRounds, firstRound)
+ })
+
+ return err
+}
+
+func performOnlineRoundParamsTailMigration(ctx context.Context, tx *sql.Tx, blockDb db.Accessor, newDatabase bool, initProto protocol.ConsensusVersion) (err error) {
+ totals, err := accountsTotals(ctx, tx, false)
+ if err != nil {
+ return err
+ }
+ rnd, err := accountsRound(tx)
+ if err != nil {
+ return err
+ }
+ var currentProto protocol.ConsensusVersion
+ if newDatabase {
+ currentProto = initProto
+ } else {
+ err = blockDb.Atomic(func(ctx context.Context, blockTx *sql.Tx) error {
+ hdr, err := blockGetHdr(blockTx, rnd)
+ if err != nil {
+ return err
+ }
+ currentProto = hdr.CurrentProtocol
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ onlineRoundParams := []ledgercore.OnlineRoundParamsData{
+ {
+ OnlineSupply: totals.Online.Money.Raw,
+ RewardsLevel: totals.RewardsLevel,
+ CurrentProtocol: currentProto,
+ },
+ }
+ return accountsPutOnlineRoundParams(tx, onlineRoundParams, rnd)
+}
+
+func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progress func(processed, total uint64), log logging.Logger) (err error) {
+
+ var insertOnlineAcct *sql.Stmt
+ insertOnlineAcct, err = tx.PrepareContext(ctx, "INSERT INTO onlineaccounts(address, data, normalizedonlinebalance, updround, votelastvalid) VALUES(?, ?, ?, ?, ?)")
+ if err != nil {
+ return err
+ }
+ defer insertOnlineAcct.Close()
+
+ var updateAcct *sql.Stmt
+ updateAcct, err = tx.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE addrid = ?")
+ if err != nil {
+ return err
+ }
+ defer updateAcct.Close()
+
+ var rows *sql.Rows
+ rows, err = tx.QueryContext(ctx, "SELECT addrid, address, data, normalizedonlinebalance FROM accountbase")
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ var insertRes sql.Result
+ var updateRes sql.Result
+ var rowsAffected int64
+ var processedAccounts uint64
+ var totalOnlineBaseAccounts uint64
+
+ totalOnlineBaseAccounts, err = totalAccounts(ctx, tx)
+ var total uint64
+ err = tx.QueryRowContext(ctx, "SELECT count(1) FROM accountbase").Scan(&total)
+ if err != nil {
+ if err != sql.ErrNoRows {
+ return err
+ }
+ total = 0
+ err = nil
+ }
+
+ checkSQLResult := func(e error, res sql.Result) (err error) {
+ if e != nil {
+ err = e
+ return
+ }
+ rowsAffected, err = res.RowsAffected()
+ if err != nil {
+ return err
+ }
+ if rowsAffected != 1 {
+ return fmt.Errorf("number of affected rows is not 1 - %d", rowsAffected)
+ }
+ return nil
+ }
+
+ type acctState struct {
+ old baseAccountData
+ oldEnc []byte
+ new baseAccountData
+ newEnc []byte
+ }
+ acctRehash := make(map[basics.Address]acctState)
+ var addr basics.Address
+
+ for rows.Next() {
+ var addrid sql.NullInt64
+ var addrbuf []byte
+ var encodedAcctData []byte
+ var normBal sql.NullInt64
+ err = rows.Scan(&addrid, &addrbuf, &encodedAcctData, &normBal)
+ if err != nil {
+ return err
+ }
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return err
+ }
+ var ba baseAccountData
+ err = protocol.Decode(encodedAcctData, &ba)
+ if err != nil {
+ return err
+ }
+
+ // insert entries into online accounts table
+ if ba.Status == basics.Online {
+ if ba.MicroAlgos.Raw > 0 && !normBal.Valid {
+ copy(addr[:], addrbuf)
+ return fmt.Errorf("non valid norm balance for online account %s", addr.String())
+ }
+ var baseOnlineAD baseOnlineAccountData
+ baseOnlineAD.baseVotingData = ba.baseVotingData
+ baseOnlineAD.MicroAlgos = ba.MicroAlgos
+ baseOnlineAD.RewardsBase = ba.RewardsBase
+ encodedOnlineAcctData := protocol.Encode(&baseOnlineAD)
+ insertRes, err = insertOnlineAcct.ExecContext(ctx, addrbuf, encodedOnlineAcctData, normBal.Int64, ba.UpdateRound, baseOnlineAD.VoteLastValid)
+ err = checkSQLResult(err, insertRes)
+ if err != nil {
+ return err
+ }
+ }
+
+ // remove stateproofID field for offline accounts
+ if ba.Status != basics.Online && !ba.StateProofID.IsEmpty() {
+ // store old data for account hash update
+ state := acctState{old: ba, oldEnc: encodedAcctData}
+ ba.StateProofID = merklesignature.Commitment{}
+ encodedOnlineAcctData := protocol.Encode(&ba)
+ copy(addr[:], addrbuf)
+ state.new = ba
+ state.newEnc = encodedOnlineAcctData
+ acctRehash[addr] = state
+ updateRes, err = updateAcct.ExecContext(ctx, encodedOnlineAcctData, addrid.Int64)
+ err = checkSQLResult(err, updateRes)
+ if err != nil {
+ return err
+ }
+ }
+
+ processedAccounts++
+ if progress != nil {
+ progress(processedAccounts, totalOnlineBaseAccounts)
+ }
+ }
+ if err = rows.Err(); err != nil {
+ return err
+ }
+
+ // update accounthashes for the modified accounts
+ if len(acctRehash) > 0 {
+ var count uint64
+ err := tx.QueryRow("SELECT count(1) FROM accounthashes").Scan(&count)
+ if err != nil {
+ return err
+ }
+ if count == 0 {
+ // no account hashes, done
+ return nil
+ }
+
+ mc, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return nil
+ }
+
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
+ }
+ for addr, state := range acctRehash {
+ deleteHash := accountHashBuilderV6(addr, &state.old, state.oldEnc)
+ deleted, err := trie.Delete(deleteHash)
+ if err != nil {
+ return fmt.Errorf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
+ }
+ if !deleted && log != nil {
+ log.Warnf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
+ }
+
+ addHash := accountHashBuilderV6(addr, &state.new, state.newEnc)
+ added, err := trie.Add(addHash)
+ if err != nil {
+ return fmt.Errorf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
+ }
+ if !added && log != nil {
+ log.Warnf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
+ }
+ }
+ _, err = trie.Commit()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// removeEmptyAccountData removes empty AccountData msgp-encoded entries from accountbase table
// and optionally returns list of addresses that were eliminated
func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, addresses []basics.Address, err error) {
@@ -1812,25 +2435,25 @@ func accountDataToOnline(address basics.Address, ad *ledgercore.AccountData, pro
}
}
-func resetAccountHashes(tx *sql.Tx) (err error) {
- _, err = tx.Exec(`DELETE FROM accounthashes`)
+func resetAccountHashes(ctx context.Context, tx *sql.Tx) (err error) {
+ _, err = tx.ExecContext(ctx, `DELETE FROM accounthashes`)
return
}
-func accountsReset(tx *sql.Tx) error {
+func accountsReset(ctx context.Context, tx *sql.Tx) error {
for _, stmt := range accountsResetExprs {
- _, err := tx.Exec(stmt)
+ _, err := tx.ExecContext(ctx, stmt)
if err != nil {
return err
}
}
- _, err := db.SetUserVersion(context.Background(), tx, 0)
+ _, err := db.SetUserVersion(ctx, tx, 0)
return err
}
// accountsRound returns the tracker balances round number
-func accountsRound(tx *sql.Tx) (rnd basics.Round, err error) {
- err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd)
+func accountsRound(q db.Queryable) (rnd basics.Round, err error) {
+ err = q.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd)
if err != nil {
return
}
@@ -1839,8 +2462,8 @@ func accountsRound(tx *sql.Tx) (rnd basics.Round, err error) {
// accountsHashRound returns the round of the hash tree
// if the hash of the tree doesn't exists, it returns zero.
-func accountsHashRound(tx *sql.Tx) (hashrnd basics.Round, err error) {
- err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='hashbase'").Scan(&hashrnd)
+func accountsHashRound(ctx context.Context, tx *sql.Tx) (hashrnd basics.Round, err error) {
+ err = tx.QueryRowContext(ctx, "SELECT rnd FROM acctrounds WHERE id='hashbase'").Scan(&hashrnd)
if err == sql.ErrNoRows {
hashrnd = basics.Round(0)
err = nil
@@ -1848,71 +2471,53 @@ func accountsHashRound(tx *sql.Tx) (hashrnd basics.Round, err error) {
return
}
-func accountsInitDbQueries(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
+func accountsInitDbQueries(q db.Queryable) (*accountsDbQueries, error) {
var err error
qs := &accountsDbQueries{}
- qs.listCreatablesStmt, err = r.Prepare("SELECT rnd, asset, creator FROM acctrounds LEFT JOIN assetcreators ON assetcreators.asset <= ? AND assetcreators.ctype = ? WHERE acctrounds.id='acctbase' ORDER BY assetcreators.asset desc LIMIT ?")
+ qs.listCreatablesStmt, err = q.Prepare("SELECT acctrounds.rnd, assetcreators.asset, assetcreators.creator FROM acctrounds LEFT JOIN assetcreators ON assetcreators.asset <= ? AND assetcreators.ctype = ? WHERE acctrounds.id='acctbase' ORDER BY assetcreators.asset desc LIMIT ?")
if err != nil {
return nil, err
}
- qs.lookupStmt, err = r.Prepare("SELECT accountbase.rowid, rnd, data FROM acctrounds LEFT JOIN accountbase ON address=? WHERE id='acctbase'")
+ qs.lookupStmt, err = q.Prepare("SELECT accountbase.rowid, acctrounds.rnd, accountbase.data FROM acctrounds LEFT JOIN accountbase ON address=? WHERE id='acctbase'")
if err != nil {
return nil, err
}
- qs.lookupResourcesStmt, err = r.Prepare("SELECT accountbase.rowid, rnd, resources.data FROM acctrounds LEFT JOIN accountbase ON accountbase.address = ? LEFT JOIN resources ON accountbase.rowid = resources.addrid AND resources.aidx = ? WHERE id='acctbase'")
+ qs.lookupResourcesStmt, err = q.Prepare("SELECT accountbase.rowid, acctrounds.rnd, resources.data FROM acctrounds LEFT JOIN accountbase ON accountbase.address = ? LEFT JOIN resources ON accountbase.rowid = resources.addrid AND resources.aidx = ? WHERE id='acctbase'")
if err != nil {
return nil, err
}
- qs.lookupAllResourcesStmt, err = r.Prepare("SELECT accountbase.rowid, rnd, resources.aidx, resources.data FROM acctrounds LEFT JOIN accountbase ON accountbase.address = ? LEFT JOIN resources ON accountbase.rowid = resources.addrid WHERE id='acctbase'")
+ qs.lookupAllResourcesStmt, err = q.Prepare("SELECT accountbase.rowid, acctrounds.rnd, resources.aidx, resources.data FROM acctrounds LEFT JOIN accountbase ON accountbase.address = ? LEFT JOIN resources ON accountbase.rowid = resources.addrid WHERE id='acctbase'")
if err != nil {
return nil, err
}
- qs.lookupCreatorStmt, err = r.Prepare("SELECT rnd, creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'")
+ qs.lookupCreatorStmt, err = q.Prepare("SELECT acctrounds.rnd, assetcreators.creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'")
if err != nil {
return nil, err
}
- qs.deleteStoredCatchpoint, err = w.Prepare("DELETE FROM storedcatchpoints WHERE round=?")
- if err != nil {
- return nil, err
- }
-
- qs.insertStoredCatchpoint, err = w.Prepare("INSERT INTO storedcatchpoints(round, filename, catchpoint, filesize, pinned) VALUES(?, ?, ?, ?, 0)")
- if err != nil {
- return nil, err
- }
-
- qs.selectOldestCatchpointFiles, err = r.Prepare("SELECT round, filename FROM storedcatchpoints WHERE pinned = 0 and round <= COALESCE((SELECT round FROM storedcatchpoints WHERE pinned = 0 ORDER BY round DESC LIMIT ?, 1),0) ORDER BY round ASC LIMIT ?")
- if err != nil {
- return nil, err
- }
-
- qs.selectCatchpointStateUint64, err = r.Prepare("SELECT intval FROM catchpointstate WHERE id=?")
- if err != nil {
- return nil, err
- }
+ return qs, nil
+}
- qs.deleteCatchpointState, err = w.Prepare("DELETE FROM catchpointstate WHERE id=?")
- if err != nil {
- return nil, err
- }
+func onlineAccountsInitDbQueries(r db.Queryable) (*onlineAccountsDbQueries, error) {
+ var err error
+ qs := &onlineAccountsDbQueries{}
- qs.insertCatchpointStateUint64, err = w.Prepare("INSERT OR REPLACE INTO catchpointstate(id, intval) VALUES(?, ?)")
+ qs.lookupOnlineStmt, err = r.Prepare("SELECT onlineaccounts.rowid, onlineaccounts.updround, acctrounds.rnd, onlineaccounts.data FROM acctrounds LEFT JOIN onlineaccounts ON address=? AND updround <= ? WHERE id='acctbase' ORDER BY updround DESC LIMIT 1")
if err != nil {
return nil, err
}
- qs.insertCatchpointStateString, err = w.Prepare("INSERT OR REPLACE INTO catchpointstate(id, strval) VALUES(?, ?)")
+ qs.lookupOnlineHistoryStmt, err = r.Prepare("SELECT onlineaccounts.rowid, onlineaccounts.updround, acctrounds.rnd, onlineaccounts.data FROM acctrounds LEFT JOIN onlineaccounts ON address=? WHERE id='acctbase' ORDER BY updround ASC")
if err != nil {
return nil, err
}
- qs.selectCatchpointStateString, err = r.Prepare("SELECT strval FROM catchpointstate WHERE id=?")
+ qs.lookupOnlineTotalsStmt, err = r.Prepare("SELECT data FROM onlineroundparamstail WHERE rnd=?")
if err != nil {
return nil, err
}
@@ -2084,30 +2689,104 @@ func (qs *accountsDbQueries) lookup(addr basics.Address) (data persistedAccountD
return err
})
+ return
+}
+
+func (qs *onlineAccountsDbQueries) lookupOnline(addr basics.Address, rnd basics.Round) (data persistedOnlineAccountData, err error) {
+ err = db.Retry(func() error {
+ var buf []byte
+ var rowid sql.NullInt64
+ var updround sql.NullInt64
+ err := qs.lookupOnlineStmt.QueryRow(addr[:], rnd).Scan(&rowid, &updround, &data.round, &buf)
+ if err == nil {
+ data.addr = addr
+ if len(buf) > 0 && rowid.Valid && updround.Valid {
+ data.rowid = rowid.Int64
+ data.updRound = basics.Round(updround.Int64)
+ err = protocol.Decode(buf, &data.accountData)
+ return err
+ }
+ // we don't have that account, just return the database round.
+ return nil
+ }
+
+ // this should never happen; it indicates that we don't have a current round in the acctrounds table.
+ if err == sql.ErrNoRows {
+ // Return the zero value of data
+ return fmt.Errorf("unable to query online account data for address %v : %w", addr, err)
+ }
+
+ return err
+ })
+ return
+}
+
+func (qs *onlineAccountsDbQueries) lookupOnlineTotalsHistory(round basics.Round) (basics.MicroAlgos, error) {
+ data := ledgercore.OnlineRoundParamsData{}
+ err := db.Retry(func() error {
+ row := qs.lookupOnlineTotalsStmt.QueryRow(round)
+ var buf []byte
+ err := row.Scan(&buf)
+ if err != nil {
+ return err
+ }
+ err = protocol.Decode(buf, &data)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return basics.MicroAlgos{Raw: data.OnlineSupply}, err
+}
+
+func (qs *onlineAccountsDbQueries) lookupOnlineHistory(addr basics.Address) (result []persistedOnlineAccountData, rnd basics.Round, err error) {
+ err = db.Retry(func() error {
+ rows, err := qs.lookupOnlineHistoryStmt.Query(addr[:])
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+ for rows.Next() {
+ var buf []byte
+ data := persistedOnlineAccountData{}
+ err := rows.Scan(&data.rowid, &data.updRound, &rnd, &buf)
+ if err != nil {
+ return err
+ }
+ err = protocol.Decode(buf, &data.accountData)
+ if err != nil {
+ return err
+ }
+ data.addr = addr
+ result = append(result, data)
+ }
+ return err
+ })
return
}
-func (qs *accountsDbQueries) storeCatchpoint(ctx context.Context, round basics.Round, fileName string, catchpoint string, fileSize int64) (err error) {
+func storeCatchpoint(ctx context.Context, e db.Executable, round basics.Round, fileName string, catchpoint string, fileSize int64) (err error) {
err = db.Retry(func() (err error) {
- _, err = qs.deleteStoredCatchpoint.ExecContext(ctx, round)
-
+ query := "DELETE FROM storedcatchpoints WHERE round=?"
+ _, err = e.ExecContext(ctx, query, round)
if err != nil || (fileName == "" && catchpoint == "" && fileSize == 0) {
- return
+ return err
}
- _, err = qs.insertStoredCatchpoint.ExecContext(ctx, round, fileName, catchpoint, fileSize)
- return
+ query = "INSERT INTO storedcatchpoints(round, filename, catchpoint, filesize, pinned) VALUES(?, ?, ?, ?, 0)"
+ _, err = e.ExecContext(ctx, query, round, fileName, catchpoint, fileSize)
+ return err
})
return
}
-func (qs *accountsDbQueries) getOldestCatchpointFiles(ctx context.Context, fileCount int, filesToKeep int) (fileNames map[basics.Round]string, err error) {
+func getOldestCatchpointFiles(ctx context.Context, q db.Queryable, fileCount int, filesToKeep int) (fileNames map[basics.Round]string, err error) {
err = db.Retry(func() (err error) {
- var rows *sql.Rows
- rows, err = qs.selectOldestCatchpointFiles.QueryContext(ctx, filesToKeep, fileCount)
+ query := "SELECT round, filename FROM storedcatchpoints WHERE pinned = 0 and round <= COALESCE((SELECT round FROM storedcatchpoints WHERE pinned = 0 ORDER BY round DESC LIMIT ?, 1),0) ORDER BY round ASC LIMIT ?"
+ rows, err := q.QueryContext(ctx, query, filesToKeep, fileCount)
if err != nil {
- return
+ return err
}
defer rows.Close()
@@ -2117,78 +2796,90 @@ func (qs *accountsDbQueries) getOldestCatchpointFiles(ctx context.Context, fileC
var round basics.Round
err = rows.Scan(&round, &fileName)
if err != nil {
- return
+ return err
}
fileNames[round] = fileName
}
- err = rows.Err()
- return
+ return rows.Err()
})
+ if err != nil {
+ fileNames = nil
+ }
return
}
-func (qs *accountsDbQueries) readCatchpointStateUint64(ctx context.Context, stateName catchpointState) (rnd uint64, def bool, err error) {
- var val sql.NullInt64
+func readCatchpointStateUint64(ctx context.Context, q db.Queryable, stateName catchpointState) (val uint64, err error) {
err = db.Retry(func() (err error) {
- err = qs.selectCatchpointStateUint64.QueryRowContext(ctx, stateName).Scan(&val)
- if err == sql.ErrNoRows || (err == nil && !val.Valid) {
- val.Int64 = 0 // default to zero.
- err = nil
- def = true
- return
+ query := "SELECT intval FROM catchpointstate WHERE id=?"
+ var v sql.NullInt64
+ err = q.QueryRowContext(ctx, query, stateName).Scan(&v)
+ if err == sql.ErrNoRows {
+ return nil
}
- return err
+ if err != nil {
+ return err
+ }
+ if v.Valid {
+ val = uint64(v.Int64)
+ }
+ return nil
})
- return uint64(val.Int64), def, err
+ return val, err
}
-func (qs *accountsDbQueries) writeCatchpointStateUint64(ctx context.Context, stateName catchpointState, setValue uint64) (cleared bool, err error) {
+func writeCatchpointStateUint64(ctx context.Context, e db.Executable, stateName catchpointState, setValue uint64) (err error) {
err = db.Retry(func() (err error) {
if setValue == 0 {
- _, err = qs.deleteCatchpointState.ExecContext(ctx, stateName)
- cleared = true
- return err
+ return deleteCatchpointStateImpl(ctx, e, stateName)
}
// we don't know if there is an entry in the table for this state, so we'll insert/replace it just in case.
- _, err = qs.insertCatchpointStateUint64.ExecContext(ctx, stateName, setValue)
- cleared = false
+ query := "INSERT OR REPLACE INTO catchpointstate(id, intval) VALUES(?, ?)"
+ _, err = e.ExecContext(ctx, query, stateName, setValue)
return err
})
- return cleared, err
-
+ return err
}
-func (qs *accountsDbQueries) readCatchpointStateString(ctx context.Context, stateName catchpointState) (str string, def bool, err error) {
- var val sql.NullString
+func readCatchpointStateString(ctx context.Context, q db.Queryable, stateName catchpointState) (val string, err error) {
err = db.Retry(func() (err error) {
- err = qs.selectCatchpointStateString.QueryRowContext(ctx, stateName).Scan(&val)
- if err == sql.ErrNoRows || (err == nil && !val.Valid) {
- val.String = "" // default to empty string
- err = nil
- def = true
- return
+ query := "SELECT strval FROM catchpointstate WHERE id=?"
+ var v sql.NullString
+ err = q.QueryRowContext(ctx, query, stateName).Scan(&v)
+ if err == sql.ErrNoRows {
+ return nil
}
- return err
+ if err != nil {
+ return err
+ }
+
+ if v.Valid {
+ val = v.String
+ }
+ return nil
})
- return val.String, def, err
+ return val, err
}
-func (qs *accountsDbQueries) writeCatchpointStateString(ctx context.Context, stateName catchpointState, setValue string) (cleared bool, err error) {
+func writeCatchpointStateString(ctx context.Context, e db.Executable, stateName catchpointState, setValue string) (err error) {
err = db.Retry(func() (err error) {
if setValue == "" {
- _, err = qs.deleteCatchpointState.ExecContext(ctx, stateName)
- cleared = true
- return err
+ return deleteCatchpointStateImpl(ctx, e, stateName)
}
// we don't know if there is an entry in the table for this state, so we'll insert/replace it just in case.
- _, err = qs.insertCatchpointStateString.ExecContext(ctx, stateName, setValue)
- cleared = false
+ query := "INSERT OR REPLACE INTO catchpointstate(id, strval) VALUES(?, ?)"
+ _, err = e.ExecContext(ctx, query, stateName, setValue)
return err
})
- return cleared, err
+ return err
+}
+
+func deleteCatchpointStateImpl(ctx context.Context, e db.Executable, stateName catchpointState) error {
+ query := "DELETE FROM catchpointstate WHERE id=?"
+ _, err := e.ExecContext(ctx, query, stateName)
+ return err
}
func (qs *accountsDbQueries) close() {
@@ -2198,14 +2889,19 @@ func (qs *accountsDbQueries) close() {
&qs.lookupResourcesStmt,
&qs.lookupAllResourcesStmt,
&qs.lookupCreatorStmt,
- &qs.deleteStoredCatchpoint,
- &qs.insertStoredCatchpoint,
- &qs.selectOldestCatchpointFiles,
- &qs.selectCatchpointStateUint64,
- &qs.deleteCatchpointState,
- &qs.insertCatchpointStateUint64,
- &qs.selectCatchpointStateString,
- &qs.insertCatchpointStateString,
+ }
+ for _, preparedQuery := range preparedQueries {
+ if (*preparedQuery) != nil {
+ (*preparedQuery).Close()
+ *preparedQuery = nil
+ }
+ }
+}
+
+func (qs *onlineAccountsDbQueries) close() {
+ preparedQueries := []**sql.Stmt{
+ &qs.lookupOnlineStmt,
+ &qs.lookupOnlineHistoryStmt,
}
for _, preparedQuery := range preparedQueries {
if (*preparedQuery) != nil {
@@ -2224,8 +2920,16 @@ func (qs *accountsDbQueries) close() {
//
// Note that this does not check if the accounts have a vote key valid for any
// particular round (past, present, or future).
-func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) {
- rows, err := tx.Query("SELECT address, data FROM accountbase WHERE normalizedonlinebalance>0 ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?", n, offset)
+func accountsOnlineTop(tx *sql.Tx, rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) {
+ // onlineaccounts has historical data ordered by updround for both online and offline accounts.
+ // This means some account A might have norm balance != 0 at round N and norm balance == 0 at some round K > N.
+ // For online top query one needs to find entries not fresher than X with norm balance != 0.
+ // To do that the query groups row by address and takes the latest updround, and then filters out rows with zero nor balance.
+ rows, err := tx.Query(`SELECT address, normalizedonlinebalance, data, max(updround) FROM onlineaccounts
+WHERE updround <= ?
+GROUP BY address HAVING normalizedonlinebalance > 0
+ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?`, rnd, n, offset)
+
if err != nil {
return nil, err
}
@@ -2235,12 +2939,14 @@ func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParam
for rows.Next() {
var addrbuf []byte
var buf []byte
- err = rows.Scan(&addrbuf, &buf)
+ var normBal sql.NullInt64
+ var updround sql.NullInt64
+ err = rows.Scan(&addrbuf, &normBal, &buf, &updround)
if err != nil {
return nil, err
}
- var data baseAccountData
+ var data baseOnlineAccountData
err = protocol.Decode(buf, &data)
if err != nil {
return nil, err
@@ -2252,20 +2958,70 @@ func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParam
return nil, err
}
+ if !normBal.Valid {
+ return nil, fmt.Errorf("non valid norm balance for online account %s", addr.String())
+ }
+
copy(addr[:], addrbuf)
- ad := data.GetLedgerCoreAccountData()
- res[addr] = accountDataToOnline(addr, &ad, proto)
+ // TODO: figure out protocol to use for rewards
+ // The original implementation uses current proto to recalculate norm balance
+ // In the same time, in accountsNewRound genesis protocol is used to fill norm balance value
+ // In order to be consistent with the original implementation recalculate the balance with current proto
+ normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, proto)
+ oa := data.GetOnlineAccount(addr, normBalance)
+ res[addr] = &oa
}
return res, rows.Err()
}
-func accountsTotals(tx *sql.Tx, catchpointStaging bool) (totals ledgercore.AccountTotals, err error) {
+func onlineAccountsAll(tx *sql.Tx, maxAccounts uint64) ([]persistedOnlineAccountData, error) {
+ rows, err := tx.Query("SELECT rowid, address, updround, data FROM onlineaccounts ORDER BY address, updround ASC")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ result := make([]persistedOnlineAccountData, 0, maxAccounts)
+ var numAccounts uint64
+ seenAddr := make([]byte, len(basics.Address{}))
+ for rows.Next() {
+ var addrbuf []byte
+ var buf []byte
+ data := persistedOnlineAccountData{}
+ err := rows.Scan(&data.rowid, &addrbuf, &data.updRound, &buf)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrbuf) != len(data.addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(data.addr))
+ return nil, err
+ }
+ if maxAccounts > 0 {
+ if !bytes.Equal(seenAddr, addrbuf) {
+ numAccounts++
+ if numAccounts > maxAccounts {
+ break
+ }
+ copy(seenAddr, addrbuf)
+ }
+ }
+ copy(data.addr[:], addrbuf)
+ err = protocol.Decode(buf, &data.accountData)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, data)
+ }
+ return result, nil
+}
+
+func accountsTotals(ctx context.Context, q db.Queryable, catchpointStaging bool) (totals ledgercore.AccountTotals, err error) {
id := ""
if catchpointStaging {
id = "catchpointStaging"
}
- row := tx.QueryRow("SELECT online, onlinerewardunits, offline, offlinerewardunits, notparticipating, notparticipatingrewardunits, rewardslevel FROM accounttotals WHERE id=?", id)
+ row := q.QueryRowContext(ctx, "SELECT online, onlinerewardunits, offline, offlinerewardunits, notparticipating, notparticipatingrewardunits, rewardslevel FROM accounttotals WHERE id=?", id)
err = row.Scan(&totals.Online.Money.Raw, &totals.Online.RewardUnits,
&totals.Offline.Money.Raw, &totals.Offline.RewardUnits,
&totals.NotParticipating.Money.Raw, &totals.NotParticipating.RewardUnits,
@@ -2288,6 +3044,53 @@ func accountsPutTotals(tx *sql.Tx, totals ledgercore.AccountTotals, catchpointSt
return err
}
+func accountsOnlineRoundParams(tx *sql.Tx) (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error) {
+ rows, err := tx.Query("SELECT rnd, data FROM onlineroundparamstail ORDER BY rnd ASC")
+ if err != nil {
+ return nil, 0, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var buf []byte
+ err = rows.Scan(&endRound, &buf)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ var data ledgercore.OnlineRoundParamsData
+ err = protocol.Decode(buf, &data)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ onlineRoundParamsData = append(onlineRoundParamsData, data)
+ }
+ return
+}
+
+func accountsPutOnlineRoundParams(tx *sql.Tx, onlineRoundParamsData []ledgercore.OnlineRoundParamsData, startRound basics.Round) error {
+ insertStmt, err := tx.Prepare("INSERT INTO onlineroundparamstail (rnd, data) VALUES (?, ?)")
+ if err != nil {
+ return err
+ }
+
+ for i, onlineRoundParams := range onlineRoundParamsData {
+ _, err = insertStmt.Exec(startRound+basics.Round(i), protocol.Encode(&onlineRoundParams))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func accountsPruneOnlineRoundParams(tx *sql.Tx, deleteBeforeRound basics.Round) error {
+ _, err := tx.Exec("DELETE FROM onlineroundparamstail WHERE rnd<?",
+ deleteBeforeRound,
+ )
+ return err
+}
+
type accountsWriter interface {
insertAccount(addr basics.Address, normBalance uint64, data baseAccountData) (rowid int64, err error)
deleteAccount(rowid int64) (rowsAffected int64, err error)
@@ -2303,12 +3106,22 @@ type accountsWriter interface {
close()
}
+type onlineAccountsWriter interface {
+ insertOnlineAccount(addr basics.Address, normBalance uint64, data baseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error)
+
+ close()
+}
+
type accountsSQLWriter struct {
insertCreatableIdxStmt, deleteCreatableIdxStmt *sql.Stmt
deleteByRowIDStmt, insertStmt, updateStmt *sql.Stmt
deleteResourceStmt, insertResourceStmt, updateResourceStmt *sql.Stmt
}
+type onlineAccountsSQLWriter struct {
+ insertStmt, updateStmt *sql.Stmt
+}
+
func (w *accountsSQLWriter) close() {
if w.deleteByRowIDStmt != nil {
w.deleteByRowIDStmt.Close()
@@ -2344,6 +3157,13 @@ func (w *accountsSQLWriter) close() {
}
}
+func (w *onlineAccountsSQLWriter) close() {
+ if w.insertStmt != nil {
+ w.insertStmt.Close()
+ w.insertStmt = nil
+ }
+}
+
func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts bool, hasResources bool, hasCreatables bool) (w *accountsSQLWriter, err error) {
w = new(accountsSQLWriter)
@@ -2467,6 +3287,33 @@ func (w accountsSQLWriter) deleteCreatable(cidx basics.CreatableIndex, ctype bas
return
}
+func makeOnlineAccountsSQLWriter(tx *sql.Tx, hasAccounts bool) (w *onlineAccountsSQLWriter, err error) {
+ w = new(onlineAccountsSQLWriter)
+
+ if hasAccounts {
+ w.insertStmt, err = tx.Prepare("INSERT INTO onlineaccounts (address, normalizedonlinebalance, data, updround, votelastvalid) VALUES (?, ?, ?, ?, ?)")
+ if err != nil {
+ return
+ }
+
+ w.updateStmt, err = tx.Prepare("UPDATE onlineaccounts SET normalizedonlinebalance = ?, data = ?, updround = ?, votelastvalid =? WHERE rowid = ?")
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (w onlineAccountsSQLWriter) insertOnlineAccount(addr basics.Address, normBalance uint64, data baseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) {
+ result, err := w.insertStmt.Exec(addr[:], normBalance, protocol.Encode(&data), updRound, voteLastValid)
+ if err != nil {
+ return
+ }
+ rowid, err = result.LastInsertId()
+ return
+}
+
// accountsNewRound is a convenience wrapper for accountsNewRoundImpl
func accountsNewRound(
tx *sql.Tx,
@@ -2476,6 +3323,7 @@ func accountsNewRound(
hasAccounts := updates.len() > 0
hasResources := resources.len() > 0
hasCreatables := len(creatables) > 0
+
writer, err := makeAccountsSQLWriter(tx, hasAccounts, hasResources, hasCreatables)
if err != nil {
return
@@ -2485,6 +3333,23 @@ func accountsNewRound(
return accountsNewRoundImpl(writer, updates, resources, creatables, proto, lastUpdateRound)
}
+func onlineAccountsNewRound(
+ tx *sql.Tx,
+ updates compactOnlineAccountDeltas,
+ proto config.ConsensusParams, lastUpdateRound basics.Round,
+) (updatedAccounts []persistedOnlineAccountData, err error) {
+ hasAccounts := updates.len() > 0
+
+ writer, err := makeOnlineAccountsSQLWriter(tx, hasAccounts)
+ if err != nil {
+ return
+ }
+ defer writer.close()
+
+ updatedAccounts, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ return
+}
+
// accountsNewRoundImpl updates the accountbase and assetcreators tables by applying the provided deltas to the accounts / creatables.
// The function returns a persistedAccountData for the modified accounts which can be stored in the base cache.
func accountsNewRoundImpl(
@@ -2706,6 +3571,208 @@ func accountsNewRoundImpl(
return
}
+func onlineAccountsNewRoundImpl(
+ writer onlineAccountsWriter, updates compactOnlineAccountDeltas,
+ proto config.ConsensusParams, lastUpdateRound basics.Round,
+) (updatedAccounts []persistedOnlineAccountData, err error) {
+
+ for i := 0; i < updates.len(); i++ {
+ data := updates.getByIdx(i)
+ prevAcct := data.oldAcct
+ for j := 0; j < len(data.newAcct); j++ {
+ newAcct := data.newAcct[j]
+ updRound := data.updRound[j]
+ newStatus := data.newStatus[j]
+ if prevAcct.rowid == 0 {
+ // zero rowid means we don't have a previous value.
+ if newAcct.IsEmpty() {
+ // IsEmpty means we don't have a previous value.
+ // if we didn't had it before, and we don't have anything now, just skip it.
+ } else {
+ if newStatus == basics.Online {
+ if newAcct.IsVotingEmpty() {
+ err = fmt.Errorf("empty voting data for online account %s: %v", data.address.String(), newAcct)
+ } else {
+ // create a new entry.
+ var rowid int64
+ normBalance := newAcct.NormalizedOnlineBalance(proto)
+ rowid, err = writer.insertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid))
+ if err == nil {
+ updated := persistedOnlineAccountData{
+ addr: data.address,
+ accountData: newAcct,
+ round: lastUpdateRound,
+ rowid: rowid,
+ updRound: basics.Round(updRound),
+ }
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
+ }
+ }
+ } else if !newAcct.IsVotingEmpty() {
+ err = fmt.Errorf("non-empty voting data for non-online account %s: %v", data.address.String(), newAcct)
+ }
+ }
+ } else {
+ // non-zero rowid means we had a previous value.
+ if newAcct.IsVotingEmpty() {
+ // new value is zero then go offline
+ if newStatus == basics.Online {
+ err = fmt.Errorf("empty voting data but online account %s: %v", data.address.String(), newAcct)
+ } else {
+ var rowid int64
+ rowid, err = writer.insertOnlineAccount(data.address, 0, baseOnlineAccountData{}, updRound, 0)
+ if err == nil {
+ updated := persistedOnlineAccountData{
+ addr: data.address,
+ accountData: baseOnlineAccountData{},
+ round: lastUpdateRound,
+ rowid: rowid,
+ updRound: basics.Round(updRound),
+ }
+
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
+ }
+ }
+ } else {
+ if prevAcct.accountData != newAcct {
+ var rowid int64
+ normBalance := newAcct.NormalizedOnlineBalance(proto)
+ rowid, err = writer.insertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid))
+ if err == nil {
+ updated := persistedOnlineAccountData{
+ addr: data.address,
+ accountData: newAcct,
+ round: lastUpdateRound,
+ rowid: rowid,
+ updRound: basics.Round(updRound),
+ }
+
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
+ }
+ }
+ }
+ }
+
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func rowidsToChunkedArgs(rowids []int64) [][]interface{} {
+ const sqliteMaxVariableNumber = 999
+
+ numChunks := len(rowids)/sqliteMaxVariableNumber + 1
+ if len(rowids)%sqliteMaxVariableNumber == 0 {
+ numChunks--
+ }
+ chunks := make([][]interface{}, numChunks)
+ if numChunks == 1 {
+ // optimize memory consumption for the most common case
+ chunks[0] = make([]interface{}, len(rowids))
+ for i, rowid := range rowids {
+ chunks[0][i] = interface{}(rowid)
+ }
+ } else {
+ for i := 0; i < numChunks; i++ {
+ chunkSize := sqliteMaxVariableNumber
+ if i == numChunks-1 {
+ chunkSize = len(rowids) - (numChunks-1)*sqliteMaxVariableNumber
+ }
+ chunks[i] = make([]interface{}, chunkSize)
+ }
+ for i, rowid := range rowids {
+ chunkIndex := i / sqliteMaxVariableNumber
+ chunks[chunkIndex][i%sqliteMaxVariableNumber] = interface{}(rowid)
+ }
+ }
+ return chunks
+}
+
+func onlineAccountsDeleteByRowIDs(tx *sql.Tx, rowids []int64) (err error) {
+ if len(rowids) == 0 {
+ return
+ }
+
+ // sqlite3 < 3.32.0 allows SQLITE_MAX_VARIABLE_NUMBER = 999 bindings
+ // see https://www.sqlite.org/limits.html
+ // rowids might be larger => split to chunks are remove
+ chunks := rowidsToChunkedArgs(rowids)
+ for _, chunk := range chunks {
+ _, err = tx.Exec("DELETE FROM onlineaccounts WHERE rowid IN (?"+strings.Repeat(",?", len(chunk)-1)+")", chunk...)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// onlineAccountsDelete deleted entries with updRound <= expRound
+func onlineAccountsDelete(tx *sql.Tx, forgetBefore basics.Round) (err error) {
+ rows, err := tx.Query("SELECT rowid, address, updRound, data FROM onlineaccounts WHERE updRound < ? ORDER BY address, updRound DESC", forgetBefore)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ var rowids []int64
+ var rowid sql.NullInt64
+ var updRound sql.NullInt64
+ var buf []byte
+ var addrbuf []byte
+
+ var prevAddr []byte
+
+ for rows.Next() {
+ err = rows.Scan(&rowid, &addrbuf, &updRound, &buf)
+ if err != nil {
+ return err
+ }
+ if !rowid.Valid || !updRound.Valid {
+ return fmt.Errorf("onlineAccountsDelete: invalid rowid or updRound")
+ }
+ if len(addrbuf) != len(basics.Address{}) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(basics.Address{}))
+ return
+ }
+
+ if !bytes.Equal(addrbuf, prevAddr) {
+ // new address
+ // if the first (latest) entry is
+ // - offline then delete all
+ // - online then safe to delete all previous except this first (latest)
+
+ // reset the state
+ prevAddr = addrbuf
+
+ var oad baseOnlineAccountData
+ err = protocol.Decode(buf, &oad)
+ if err != nil {
+ return
+ }
+ if oad.IsVotingEmpty() {
+ // delete this and all subsequent
+ rowids = append(rowids, rowid.Int64)
+ }
+
+ // restart the loop
+ // if there are some subsequent entries, they will deleted on the next iteration
+ // if no subsequent entries, the loop will reset the state and the latest entry does not get deleted
+ continue
+ }
+ // delete all subsequent entries
+ rowids = append(rowids, rowid.Int64)
+ }
+
+ return onlineAccountsDeleteByRowIDs(tx, rowids)
+}
+
// updates the round number associated with the current account data.
func updateAccountsRound(tx *sql.Tx, rnd basics.Round) (err error) {
res, err := tx.Exec("UPDATE acctrounds SET rnd=? WHERE id='acctbase' AND rnd<?", rnd, rnd)
@@ -2737,8 +3804,8 @@ func updateAccountsRound(tx *sql.Tx, rnd basics.Round) (err error) {
}
// updates the round number associated with the hash of current account data.
-func updateAccountsHashRound(tx *sql.Tx, hashRound basics.Round) (err error) {
- res, err := tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
+func updateAccountsHashRound(ctx context.Context, tx *sql.Tx, hashRound basics.Round) (err error) {
+ res, err := tx.ExecContext(ctx, "INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
if err != nil {
return
}
@@ -2757,7 +3824,7 @@ func updateAccountsHashRound(tx *sql.Tx, hashRound basics.Round) (err error) {
// totalAccounts returns the total number of accounts
func totalAccounts(ctx context.Context, tx *sql.Tx) (total uint64, err error) {
- err = tx.QueryRowContext(ctx, "SELECT count(*) FROM accountbase").Scan(&total)
+ err = tx.QueryRowContext(ctx, "SELECT count(1) FROM accountbase").Scan(&total)
if err == sql.ErrNoRows {
total = 0
err = nil
@@ -3003,6 +4070,10 @@ func (iterator *encodedAccountsBatchIter) Close() {
iterator.accountsRows.Close()
iterator.accountsRows = nil
}
+ if iterator.resourcesRows != nil {
+ iterator.resourcesRows.Close()
+ iterator.resourcesRows = nil
+ }
}
// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
@@ -3066,10 +4137,14 @@ func processAllResources(
callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error,
) (pendingRow, error) {
var err error
+
+ // Declare variabled outside of the loop to prevent allocations per iteration.
+ // At least resData is resolved as "escaped" because of passing it by a pointer to protocol.Decode()
+ var buf []byte
+ var addrid int64
+ var aidx basics.CreatableIndex
+ var resData resourcesData
for {
- var buf []byte
- var addrid int64
- var aidx basics.CreatableIndex
if pr.addrid != 0 {
// some accounts may not have resources, consider the following case:
// acct 1 and 3 has resources, account 2 does not
@@ -3107,7 +4182,7 @@ func processAllResources(
return pendingRow{addrid, aidx, buf}, err
}
}
- var resData resourcesData
+ resData = resourcesData{}
err = protocol.Decode(buf, &resData)
if err != nil {
return pendingRow{}, err
@@ -3131,10 +4206,12 @@ func processAllBaseAccountRecords(
var prevAddr basics.Address
var err error
count := 0
+
+ var accountData baseAccountData
+ var addrbuf []byte
+ var buf []byte
+ var rowid int64
for baseRows.Next() {
- var addrbuf []byte
- var buf []byte
- var rowid int64
err = baseRows.Scan(&rowid, &addrbuf, &buf)
if err != nil {
return 0, pendingRow{}, err
@@ -3147,7 +4224,7 @@ func processAllBaseAccountRecords(
copy(addr[:], addrbuf)
- var accountData baseAccountData
+ accountData = baseAccountData{}
err = protocol.Decode(buf, &accountData)
if err != nil {
return 0, pendingRow{}, err
@@ -3597,3 +4674,271 @@ func (pac *persistedAccountData) before(other *persistedAccountData) bool {
func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
return prd.round < other.round
}
+
+// before compares the round numbers of two persistedAccountData and determines if the current persistedAccountData
+// happened before the other.
+func (pac *persistedOnlineAccountData) before(other *persistedOnlineAccountData) bool {
+ return pac.round < other.round
+}
+
+// txTailRoundLease is used as part of txTailRound for storing
+// a single lease.
+type txTailRoundLease struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Sender basics.Address `codec:"s"`
+ Lease [32]byte `codec:"l,allocbound=-"`
+ TxnIdx uint64 `code:"i"` //!-- index of the entry in TxnIDs/LastValid
+}
+
+// TxTailRound contains the information about a single round of transactions.
+// The TxnIDs and LastValid would both be of the same length, and are stored
+// in that way for efficient message=pack encoding. The Leases would point to the
+// respective transaction index. Note that this isn’t optimized for storing
+// leases, as leases are extremely rare.
+type txTailRound struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ TxnIDs []transactions.Txid `codec:"i,allocbound=-"`
+ LastValid []basics.Round `codec:"v,allocbound=-"`
+ Leases []txTailRoundLease `codec:"l,allocbound=-"`
+ Hdr bookkeeping.BlockHeader `codec:"h,allocbound=-"`
+}
+
+// encode the transaction tail data into a serialized form, and return the serialized data
+// as well as the hash of the data.
+func (t *txTailRound) encode() ([]byte, crypto.Digest) {
+ tailData := protocol.Encode(t)
+ hash := crypto.Hash(tailData)
+ return tailData, hash
+}
+
+func txTailRoundFromBlock(blk bookkeeping.Block) (*txTailRound, error) {
+ payset, err := blk.DecodePaysetFlat()
+ if err != nil {
+ return nil, err
+ }
+
+ tail := &txTailRound{}
+
+ tail.TxnIDs = make([]transactions.Txid, len(payset))
+ tail.LastValid = make([]basics.Round, len(payset))
+ tail.Hdr = blk.BlockHeader
+
+ for txIdxtxid, txn := range payset {
+ tail.TxnIDs[txIdxtxid] = txn.ID()
+ tail.LastValid[txIdxtxid] = txn.Txn.LastValid
+ if txn.Txn.Lease != [32]byte{} {
+ tail.Leases = append(tail.Leases, txTailRoundLease{
+ Sender: txn.Txn.Sender,
+ Lease: txn.Txn.Lease,
+ TxnIdx: uint64(txIdxtxid),
+ })
+ }
+ }
+ return tail, nil
+}
+
+func txtailNewRound(ctx context.Context, tx *sql.Tx, baseRound basics.Round, roundData [][]byte, forgetBeforeRound basics.Round) error {
+ insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO txtail(rnd, data) VALUES(?, ?)")
+ if err != nil {
+ return err
+ }
+ defer insertStmt.Close()
+
+ for i, data := range roundData {
+ _, err = insertStmt.ExecContext(ctx, int(baseRound)+i, data[:])
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = tx.ExecContext(ctx, "DELETE FROM txtail WHERE rnd < ?", forgetBeforeRound)
+ return err
+}
+
+func loadTxTail(ctx context.Context, tx *sql.Tx, dbRound basics.Round) (roundData []*txTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) {
+ rows, err := tx.QueryContext(ctx, "SELECT rnd, data FROM txtail ORDER BY rnd DESC")
+ if err != nil {
+ return nil, nil, 0, err
+ }
+ defer rows.Close()
+
+ expectedRound := dbRound
+ for rows.Next() {
+ var round basics.Round
+ var data []byte
+ err = rows.Scan(&round, &data)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+ if round != expectedRound {
+ return nil, nil, 0, fmt.Errorf("txtail table contain unexpected round %d; round %d was expected", round, expectedRound)
+ }
+ tail := &txTailRound{}
+ err = protocol.Decode(data, tail)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+ roundData = append(roundData, tail)
+ roundHash = append(roundHash, crypto.Hash(data))
+ expectedRound--
+ }
+ // reverse the array ordering in-place so that it would be incremental order.
+ for i := 0; i < len(roundData)/2; i++ {
+ roundData[i], roundData[len(roundData)-i-1] = roundData[len(roundData)-i-1], roundData[i]
+ roundHash[i], roundHash[len(roundHash)-i-1] = roundHash[len(roundHash)-i-1], roundHash[i]
+ }
+ return roundData, roundHash, expectedRound + 1, nil
+}
+
+// For the `catchpointfirststageinfo` table.
+type catchpointFirstStageInfo struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Totals ledgercore.AccountTotals `codec:"accountTotals"`
+ TrieBalancesHash crypto.Digest `codec:"trieBalancesHash"`
+ // Total number of accounts in the catchpoint data file. Only set when catchpoint
+ // data files are generated.
+ TotalAccounts uint64 `codec:"accountsCount"`
+ // Total number of chunks in the catchpoint data file. Only set when catchpoint
+ // data files are generated.
+ TotalChunks uint64 `codec:"chunksCount"`
+ // BiggestChunkLen is the size in the bytes of the largest chunk, used when re-packing.
+ BiggestChunkLen uint64 `codec:"biggestChunk"`
+}
+
+func insertOrReplaceCatchpointFirstStageInfo(ctx context.Context, e db.Executable, round basics.Round, info *catchpointFirstStageInfo) error {
+ infoSerialized := protocol.Encode(info)
+ f := func() error {
+ query := "INSERT OR REPLACE INTO catchpointfirststageinfo(round, info) VALUES(?, ?)"
+ _, err := e.ExecContext(ctx, query, round, infoSerialized)
+ return err
+ }
+ return db.Retry(f)
+}
+
+func selectCatchpointFirstStageInfo(ctx context.Context, q db.Queryable, round basics.Round) (catchpointFirstStageInfo, bool /*exists*/, error) {
+ var data []byte
+ f := func() error {
+ query := "SELECT info FROM catchpointfirststageinfo WHERE round=?"
+ err := q.QueryRowContext(ctx, query, round).Scan(&data)
+ if err == sql.ErrNoRows {
+ data = nil
+ return nil
+ }
+ return err
+ }
+ err := db.Retry(f)
+ if err != nil {
+ return catchpointFirstStageInfo{}, false, err
+ }
+
+ if data == nil {
+ return catchpointFirstStageInfo{}, false, nil
+ }
+
+ var res catchpointFirstStageInfo
+ err = protocol.Decode(data, &res)
+ if err != nil {
+ return catchpointFirstStageInfo{}, false, err
+ }
+
+ return res, true, nil
+}
+
+func selectOldCatchpointFirstStageInfoRounds(ctx context.Context, q db.Queryable, maxRound basics.Round) ([]basics.Round, error) {
+ var res []basics.Round
+
+ f := func() error {
+ query := "SELECT round FROM catchpointfirststageinfo WHERE round <= ?"
+ rows, err := q.QueryContext(ctx, query, maxRound)
+ if err != nil {
+ return err
+ }
+
+ // Clear `res` in case this function is repeated.
+ res = res[:0]
+ for rows.Next() {
+ var r basics.Round
+ err = rows.Scan(&r)
+ if err != nil {
+ return err
+ }
+ res = append(res, r)
+ }
+
+ return nil
+ }
+ err := db.Retry(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func deleteOldCatchpointFirstStageInfo(ctx context.Context, e db.Executable, maxRoundToDelete basics.Round) error {
+ f := func() error {
+ query := "DELETE FROM catchpointfirststageinfo WHERE round <= ?"
+ _, err := e.ExecContext(ctx, query, maxRoundToDelete)
+ return err
+ }
+ return db.Retry(f)
+}
+
+func insertUnfinishedCatchpoint(ctx context.Context, e db.Executable, round basics.Round, blockHash crypto.Digest) error {
+ f := func() error {
+ query := "INSERT INTO unfinishedcatchpoints(round, blockhash) VALUES(?, ?)"
+ _, err := e.ExecContext(ctx, query, round, blockHash[:])
+ return err
+ }
+ return db.Retry(f)
+}
+
+type unfinishedCatchpointRecord struct {
+ round basics.Round
+ blockHash crypto.Digest
+}
+
+func selectUnfinishedCatchpoints(ctx context.Context, q db.Queryable) ([]unfinishedCatchpointRecord, error) {
+ var res []unfinishedCatchpointRecord
+
+ f := func() error {
+ query := "SELECT round, blockhash FROM unfinishedcatchpoints ORDER BY round"
+ rows, err := q.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+
+ // Clear `res` in case this function is repeated.
+ res = res[:0]
+ for rows.Next() {
+ var record unfinishedCatchpointRecord
+ var blockHash []byte
+ err = rows.Scan(&record.round, &blockHash)
+ if err != nil {
+ return err
+ }
+ copy(record.blockHash[:], blockHash)
+ res = append(res, record)
+ }
+
+ return nil
+ }
+ err := db.Retry(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func deleteUnfinishedCatchpoint(ctx context.Context, e db.Executable, round basics.Round) error {
+ f := func() error {
+ query := "DELETE FROM unfinishedcatchpoints WHERE round = ?"
+ _, err := e.ExecContext(ctx, query, round)
+ return err
+ }
+ return db.Retry(f)
+}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index ca4432b6a..a13547294 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -26,6 +26,7 @@ import (
"fmt"
"math/rand"
"os"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -37,7 +38,9 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
@@ -46,11 +49,11 @@ import (
"github.com/algorand/go-algorand/util/db"
)
-func accountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool) {
- newDB, err := accountsInit(tx, initAccounts, proto)
+func accountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
+ newDB, err := accountsInit(tx, initAccounts, config.Consensus[proto])
require.NoError(tb, err)
- err = accountsAddNormalizedBalance(tx, proto)
+ err = accountsAddNormalizedBalance(tx, config.Consensus[proto])
require.NoError(tb, err)
err = accountsCreateResourceTable(context.Background(), tx)
@@ -59,6 +62,27 @@ func accountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address
err = performResourceTableMigration(context.Background(), tx, nil)
require.NoError(tb, err)
+ err = accountsCreateOnlineAccountsTable(context.Background(), tx)
+ require.NoError(tb, err)
+
+ err = accountsCreateTxTailTable(context.Background(), tx)
+ require.NoError(tb, err)
+
+ err = performOnlineAccountsTableMigration(context.Background(), tx, nil, nil)
+ require.NoError(tb, err)
+
+ // since this is a test that starts from genesis, there is no tail that needs to be migrated.
+ // we'll pass a nil here in order to ensure we still call this method, although it would
+ // be a noop.
+ err = performTxTailTableMigration(context.Background(), nil, db.Accessor{})
+ require.NoError(tb, err)
+
+ err = accountsCreateOnlineRoundParamsTable(context.Background(), tx)
+ require.NoError(tb, err)
+
+ err = performOnlineRoundParamsTailMigration(context.Background(), tx, db.Accessor{}, true, proto)
+ require.NoError(tb, err)
+
return newDB
}
@@ -67,7 +91,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
require.NoError(t, err)
require.Equal(t, r, rnd)
- aq, err := accountsInitDbQueries(tx, tx)
+ aq, err := accountsInitDbQueries(tx)
require.NoError(t, err)
defer aq.close()
@@ -96,7 +120,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
require.NoError(t, err)
require.Equal(t, all, accts)
- totals, err := accountsTotals(tx, false)
+ totals, err := accountsTotals(context.Background(), tx, false)
require.NoError(t, err)
require.Equal(t, totalOnline, totals.Online.Money.Raw, "mismatching total online money")
require.Equal(t, totalOffline, totals.Offline.Money.Raw)
@@ -119,28 +143,28 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
}
}
- for i := 0; i < len(onlineAccounts); i++ {
- dbtop, err := accountsOnlineTop(tx, 0, uint64(i), proto)
- require.NoError(t, err)
- require.Equal(t, i, len(dbtop))
+ // Compute the top-N accounts ourselves
+ var testtop []ledgercore.OnlineAccount
+ for _, data := range onlineAccounts {
+ testtop = append(testtop, *data)
+ }
- // Compute the top-N accounts ourselves
- var testtop []ledgercore.OnlineAccount
- for _, data := range onlineAccounts {
- testtop = append(testtop, *data)
+ sort.Slice(testtop, func(i, j int) bool {
+ ibal := testtop[i].NormalizedOnlineBalance
+ jbal := testtop[j].NormalizedOnlineBalance
+ if ibal > jbal {
+ return true
+ }
+ if ibal < jbal {
+ return false
}
+ return bytes.Compare(testtop[i].Address[:], testtop[j].Address[:]) > 0
+ })
- sort.Slice(testtop, func(i, j int) bool {
- ibal := testtop[i].NormalizedOnlineBalance
- jbal := testtop[j].NormalizedOnlineBalance
- if ibal > jbal {
- return true
- }
- if ibal < jbal {
- return false
- }
- return bytes.Compare(testtop[i].Address[:], testtop[j].Address[:]) > 0
- })
+ for i := 0; i < len(onlineAccounts); i++ {
+ dbtop, err := accountsOnlineTop(tx, rnd, 0, uint64(i), proto)
+ require.NoError(t, err)
+ require.Equal(t, i, len(dbtop))
for j := 0; j < i; j++ {
_, ok := dbtop[testtop[j].Address]
@@ -148,7 +172,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
}
}
- top, err := accountsOnlineTop(tx, 0, uint64(len(onlineAccounts)+1), proto)
+ top, err := accountsOnlineTop(tx, rnd, 0, uint64(len(onlineAccounts)+1), proto)
require.NoError(t, err)
require.Equal(t, len(top), len(onlineAccounts))
}
@@ -167,7 +191,7 @@ func TestAccountDBInit(t *testing.T) {
defer tx.Rollback()
accts := ledgertesting.RandomAccounts(20, true)
- newDB := accountsInitTest(t, tx, accts, proto)
+ newDB := accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
require.True(t, newDB)
checkAccounts(t, tx, 0, accts)
@@ -228,10 +252,14 @@ func TestAccountDBRound(t *testing.T) {
defer tx.Rollback()
accts := ledgertesting.RandomAccounts(20, true)
- accountsInitTest(t, tx, accts, proto)
+ accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
checkAccounts(t, tx, 0, accts)
- totals, err := accountsTotals(tx, false)
+ totals, err := accountsTotals(context.Background(), tx, false)
require.NoError(t, err)
+ expectedOnlineRoundParams, endRound, err := accountsOnlineRoundParams(tx)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(expectedOnlineRoundParams))
+ require.Equal(t, 0, int(endRound))
// used to determine how many creatables element will be in the test per iteration
numElementsPerSegment := 10
@@ -242,9 +270,11 @@ func TestAccountDBRound(t *testing.T) {
expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
var baseAccounts lruAccounts
var baseResources lruResources
+ var baseOnlineAccounts lruOnlineAccounts
var newacctsTotals map[basics.Address]ledgercore.AccountData
baseAccounts.init(nil, 100, 80)
baseResources.init(nil, 100, 80)
+ baseOnlineAccounts.init(nil, 100, 80)
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID)
@@ -253,12 +283,17 @@ func TestAccountDBRound(t *testing.T) {
ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs,
expectedDbImage, numElementsPerSegment)
- updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(i), true, baseAccounts)
- resourceUpdatesCnt := makeCompactResourceDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(i), true, baseAccounts, baseResources)
+ oldBase := i - 1
+ updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(oldBase), true, baseAccounts)
+ resourceUpdatesCnt := makeCompactResourceDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(oldBase), true, baseAccounts, baseResources)
+ updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates}, basics.Round(oldBase), baseOnlineAccounts)
err = updatesCnt.accountsLoadOld(tx)
require.NoError(t, err)
+ err = updatesOnlineCnt.accountsLoadOld(tx)
+ require.NoError(t, err)
+
knownAddresses := make(map[basics.Address]int64)
for _, delta := range updatesCnt.deltas {
knownAddresses[delta.oldAcct.addr] = delta.oldAcct.rowid
@@ -269,6 +304,11 @@ func TestAccountDBRound(t *testing.T) {
err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
+ onlineRoundParams := ledgercore.OnlineRoundParamsData{RewardsLevel: totals.RewardsLevel, OnlineSupply: totals.Online.Money.Raw, CurrentProtocol: protocol.ConsensusCurrentVersion}
+ err = accountsPutOnlineRoundParams(tx, []ledgercore.OnlineRoundParamsData{onlineRoundParams}, basics.Round(i))
+ require.NoError(t, err)
+ expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
+
updatedAccts, updatesResources, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
@@ -277,9 +317,17 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
+
+ updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
+ require.NoError(t, err)
+
err = updateAccountsRound(tx, basics.Round(i))
require.NoError(t, err)
+ // TODO: calculate exact number of updates?
+ // newly created online accounts + accounts went offline + voting data/stake modifed accounts
+ require.NotEmpty(t, updatedOnlineAccts)
+
checkAccounts(t, tx, basics.Round(i), accts)
checkCreatables(t, tx, i, expectedDbImage)
}
@@ -291,10 +339,15 @@ func TestAccountDBRound(t *testing.T) {
}
expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{})
- actualTotals, err := accountsTotals(tx, false)
+ actualTotals, err := accountsTotals(context.Background(), tx, false)
require.NoError(t, err)
require.Equal(t, expectedTotals, actualTotals)
+ actualOnlineRoundParams, endRound, err := accountsOnlineRoundParams(tx)
+ require.NoError(t, err)
+ require.Equal(t, expectedOnlineRoundParams, actualOnlineRoundParams)
+ require.Equal(t, 9, int(endRound))
+
// check LoadAllFullAccounts
loaded := make(map[basics.Address]basics.AccountData, len(accts))
acctCb := func(addr basics.Address, data basics.AccountData) {
@@ -352,7 +405,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
defer tx.Rollback()
accts := ledgertesting.RandomAccounts(1, true)
- accountsInitTest(t, tx, accts, proto)
+ accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
addr := ledgertesting.RandomAddress()
// lastCreatableID stores asset or app max used index to get rid of conflicts
@@ -413,8 +466,6 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
func TestAccountStorageWithStateProofID(t *testing.T) {
partitiontest.PartitionTest(t)
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
dbs, _ := dbOpenTest(t, true)
setDbLogging(t, dbs)
defer dbs.Close()
@@ -424,14 +475,14 @@ func TestAccountStorageWithStateProofID(t *testing.T) {
defer tx.Rollback()
accts := ledgertesting.RandomAccounts(20, false)
- _ = accountsInitTest(t, tx, accts, proto)
+ _ = accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
checkAccounts(t, tx, 0, accts)
require.True(t, allAccountsHaveStateProofPKs(accts))
}
func allAccountsHaveStateProofPKs(accts map[basics.Address]basics.AccountData) bool {
for _, data := range accts {
- if data.StateProofID.IsEmpty() {
+ if data.Status == basics.Online && data.StateProofID.IsEmpty() {
return false
}
}
@@ -565,7 +616,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A
secrets := crypto.GenerateOneTimeSignatureSecrets(15, 500)
pubVrfKey, _ := crypto.VrfKeygenFromSeed([32]byte{0, 1, 2, 3})
var stateProofID merklesignature.Verifier
- crypto.RandBytes(stateProofID[:])
+ crypto.RandBytes(stateProofID.Commitment[:])
updates = make(map[basics.Address]basics.AccountData, numAccounts)
for i := 0; i < numAccounts; i++ {
@@ -577,7 +628,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A
RewardedMicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff / uint64(numAccounts)},
VoteID: secrets.OneTimeSignatureVerifier,
SelectionID: pubVrfKey,
- StateProofID: stateProofID,
+ StateProofID: stateProofID.Commitment,
VoteFirstValid: basics.Round(0x000ffffffffffffff),
VoteLastValid: basics.Round(0x000ffffffffffffff),
VoteKeyDilution: 0x000ffffffffffffff,
@@ -607,7 +658,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A
return
}
-func benchmarkInitBalances(b testing.TB, numAccounts int, dbs db.Pair, proto config.ConsensusParams) (updates map[basics.Address]basics.AccountData) {
+func benchmarkInitBalances(b testing.TB, numAccounts int, dbs db.Pair, proto protocol.ConsensusVersion) (updates map[basics.Address]basics.AccountData) {
tx, err := dbs.Wdb.Handle.Begin()
require.NoError(b, err)
@@ -627,12 +678,11 @@ func cleanupTestDb(dbs db.Pair, dbName string, inMemory bool) {
}
func benchmarkReadingAllBalances(b *testing.B, inMemory bool) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbs, fn := dbOpenTest(b, inMemory)
setDbLogging(b, dbs)
defer cleanupTestDb(dbs, fn, inMemory)
- benchmarkInitBalances(b, b.N, dbs, proto)
+ benchmarkInitBalances(b, b.N, dbs, protocol.ConsensusCurrentVersion)
tx, err := dbs.Rdb.Handle.Begin()
require.NoError(b, err)
@@ -659,14 +709,13 @@ func BenchmarkReadingAllBalancesDisk(b *testing.B) {
}
func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbs, fn := dbOpenTest(b, inMemory)
setDbLogging(b, dbs)
defer cleanupTestDb(dbs, fn, inMemory)
- accounts := benchmarkInitBalances(b, b.N, dbs, proto)
+ accounts := benchmarkInitBalances(b, b.N, dbs, protocol.ConsensusCurrentVersion)
- qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
require.NoError(b, err)
// read all the balances in the database, shuffled
@@ -698,14 +747,13 @@ func BenchmarkWritingRandomBalancesDisk(b *testing.B) {
batchCount := 1000
startupAcct := 5
initDatabase := func() (*sql.Tx, func(), error) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbs, fn := dbOpenTest(b, false)
setDbLogging(b, dbs)
cleanup := func() {
cleanupTestDb(dbs, fn, false)
}
- benchmarkInitBalances(b, startupAcct, dbs, proto)
+ benchmarkInitBalances(b, startupAcct, dbs, protocol.ConsensusCurrentVersion)
dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
// insert 1M accounts data, in batches of 1000
@@ -833,10 +881,10 @@ func TestAccountsReencoding(t *testing.T) {
secrets := crypto.GenerateOneTimeSignatureSecrets(15, 500)
pubVrfKey, _ := crypto.VrfKeygenFromSeed([32]byte{0, 1, 2, 3})
var stateProofID merklesignature.Verifier
- crypto.RandBytes(stateProofID[:])
+ crypto.RandBytes(stateProofID.Commitment[:])
err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), config.Consensus[protocol.ConsensusCurrentVersion])
+ accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
for _, oldAccData := range oldEncodedAccountsData {
addr := ledgertesting.RandomAddress()
@@ -854,7 +902,7 @@ func TestAccountsReencoding(t *testing.T) {
RewardedMicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff},
VoteID: secrets.OneTimeSignatureVerifier,
SelectionID: pubVrfKey,
- StateProofID: stateProofID,
+ StateProofID: stateProofID.Commitment,
VoteFirstValid: basics.Round(0x000ffffffffffffff),
VoteLastValid: basics.Round(0x000ffffffffffffff),
VoteKeyDilution: 0x000ffffffffffffff,
@@ -914,11 +962,11 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
defer dbs.Close()
err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), config.Consensus[protocol.ConsensusCurrentVersion])
+ accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
return nil
})
require.NoError(t, err)
- qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
require.NoError(t, err)
require.NotNil(t, qs.listCreatablesStmt)
qs.close()
@@ -2108,7 +2156,55 @@ func TestBaseAccountDataIsEmpty(t *testing.T) {
}
structureTesting := func(t *testing.T) {
encoding, err := json.Marshal(&empty)
- expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"VoteID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"SelectionID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UpdateRound":0}`
+ zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"UpdateRound":0}`
+ require.NoError(t, err)
+ require.Equal(t, expectedEncoding, string(encoding))
+ }
+ t.Run("Positive", positiveTesting)
+ t.Run("Negative", negativeTesting)
+ t.Run("Structure", structureTesting)
+
+}
+
+func TestBaseOnlineAccountDataIsEmpty(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ positiveTesting := func(t *testing.T) {
+ var ba baseOnlineAccountData
+ require.True(t, ba.IsEmpty())
+ require.True(t, ba.IsVotingEmpty())
+ ba.MicroAlgos.Raw = 100
+ require.True(t, ba.IsVotingEmpty())
+ ba.RewardsBase = 200
+ require.True(t, ba.IsVotingEmpty())
+ }
+ var empty baseOnlineAccountData
+ negativeTesting := func(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ randObj, _ := protocol.RandomizeObjectField(&baseOnlineAccountData{})
+ ba := randObj.(*baseOnlineAccountData)
+ if *ba == empty {
+ continue
+ }
+ require.False(t, ba.IsEmpty(), "base account : %v", ba)
+ break
+ }
+ {
+ var ba baseOnlineAccountData
+ ba.MicroAlgos.Raw = 100
+ require.False(t, ba.IsEmpty())
+ }
+ {
+ var ba baseOnlineAccountData
+ ba.RewardsBase = 200
+ require.False(t, ba.IsEmpty())
+ }
+ }
+ structureTesting := func(t *testing.T) {
+ encoding, err := json.Marshal(&empty)
+ zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ expectedEncoding := `{"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"MicroAlgos":{"Raw":0},"RewardsBase":0}`
require.NoError(t, err)
require.Equal(t, expectedEncoding, string(encoding))
}
@@ -2118,6 +2214,99 @@ func TestBaseAccountDataIsEmpty(t *testing.T) {
}
+func TestBaseOnlineAccountDataGettersSetters(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ addr := ledgertesting.RandomAddress()
+ data := ledgertesting.RandomAccountData(1)
+ data.Status = basics.Online
+ crypto.RandBytes(data.VoteID[:])
+ crypto.RandBytes(data.SelectionID[:])
+ crypto.RandBytes(data.StateProofID[:])
+ data.VoteFirstValid = basics.Round(crypto.RandUint64())
+ data.VoteLastValid = basics.Round(crypto.RandUint64()) // int64 is the max sqlite can store
+ data.VoteKeyDilution = crypto.RandUint64()
+
+ var ba baseOnlineAccountData
+ ad := ledgercore.ToAccountData(data)
+ ba.SetCoreAccountData(&ad)
+
+ require.Equal(t, data.MicroAlgos, ba.MicroAlgos)
+ require.Equal(t, data.RewardsBase, ba.RewardsBase)
+ require.Equal(t, data.VoteID, ba.VoteID)
+ require.Equal(t, data.SelectionID, ba.SelectionID)
+ require.Equal(t, data.VoteFirstValid, ba.VoteFirstValid)
+ require.Equal(t, data.VoteLastValid, ba.VoteLastValid)
+ require.Equal(t, data.VoteKeyDilution, ba.VoteKeyDilution)
+ require.Equal(t, data.StateProofID, ba.StateProofID)
+
+ normBalance := basics.NormalizedOnlineAccountBalance(basics.Online, data.RewardsBase, data.MicroAlgos, proto)
+ require.Equal(t, normBalance, ba.NormalizedOnlineBalance(proto))
+ oa := ba.GetOnlineAccount(addr, normBalance)
+
+ require.Equal(t, addr, oa.Address)
+ require.Equal(t, ba.MicroAlgos, oa.MicroAlgos)
+ require.Equal(t, ba.RewardsBase, oa.RewardsBase)
+ require.Equal(t, normBalance, oa.NormalizedOnlineBalance)
+ require.Equal(t, ba.VoteFirstValid, oa.VoteFirstValid)
+ require.Equal(t, ba.VoteLastValid, oa.VoteLastValid)
+ require.Equal(t, ba.StateProofID, oa.StateProofID)
+
+ rewardsLevel := uint64(1)
+ microAlgos, _, _ := basics.WithUpdatedRewards(
+ proto, basics.Online, oa.MicroAlgos, basics.MicroAlgos{}, ba.RewardsBase, rewardsLevel,
+ )
+ oad := ba.GetOnlineAccountData(proto, rewardsLevel)
+
+ require.Equal(t, microAlgos, oad.MicroAlgosWithRewards)
+ require.Equal(t, ba.VoteID, oad.VoteID)
+ require.Equal(t, ba.SelectionID, oad.SelectionID)
+ require.Equal(t, ba.StateProofID, oad.StateProofID)
+ require.Equal(t, ba.VoteFirstValid, oad.VoteFirstValid)
+ require.Equal(t, ba.VoteLastValid, oad.VoteLastValid)
+ require.Equal(t, ba.VoteKeyDilution, oad.VoteKeyDilution)
+}
+
+func TestBaseVotingDataGettersSetters(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ data := ledgertesting.RandomAccountData(1)
+ data.Status = basics.Online
+ crypto.RandBytes(data.VoteID[:])
+ crypto.RandBytes(data.SelectionID[:])
+ crypto.RandBytes(data.StateProofID[:])
+ data.VoteFirstValid = basics.Round(crypto.RandUint64())
+ data.VoteLastValid = basics.Round(crypto.RandUint64()) // int64 is the max sqlite can store
+ data.VoteKeyDilution = crypto.RandUint64()
+
+ var bv baseVotingData
+ require.True(t, bv.IsEmpty())
+
+ ad := ledgercore.ToAccountData(data)
+ bv.SetCoreAccountData(&ad)
+
+ require.False(t, bv.IsEmpty())
+ require.Equal(t, data.VoteID, bv.VoteID)
+ require.Equal(t, data.SelectionID, bv.SelectionID)
+ require.Equal(t, data.VoteFirstValid, bv.VoteFirstValid)
+ require.Equal(t, data.VoteLastValid, bv.VoteLastValid)
+ require.Equal(t, data.VoteKeyDilution, bv.VoteKeyDilution)
+ require.Equal(t, data.StateProofID, bv.StateProofID)
+}
+
+func TestBaseOnlineAccountDataReflect(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ require.Equal(t, 4, reflect.TypeOf(baseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count")
+}
+
+func TestBaseVotingDataReflect(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ require.Equal(t, 7, reflect.TypeOf(baseVotingData{}).NumField(), "update all getters and setters for baseVotingData and change the field count")
+}
+
func TestLookupAccountAddressFromAddressID(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -2131,7 +2320,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) {
}
addrsids := make(map[basics.Address]int64)
err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), config.Consensus[protocol.ConsensusCurrentVersion])
+ accountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
for i := range addrs {
res, err := tx.ExecContext(ctx, "INSERT INTO accountbase (address, data) VALUES (?, ?)", addrs[i][:], []byte{12, 3, 4})
@@ -2728,3 +2917,1029 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(makeResourcesData(uint64(0)), upd[0].data)
}
}
+
+// TestAccountTopOnline ensures accountsOnlineTop return a right subset of accounts
+// from the history table.
+// Start with two online accounts A, B at round 1
+// At round 2 make A offline.
+// At round 3 make B offline and add a new online account C.
+//
+// addr | rnd | status
+// -----|-----|--------
+// A | 1 | 1
+// B | 1 | 1
+// A | 2 | 0
+// B | 3 | 0
+// C | 3 | 1
+//
+// Ensure
+// - for round 1 A and B returned
+// - for round 2 only B returned
+// - for round 3 only C returned
+// The test also checks accountsDbQueries.lookupOnline
+func TestAccountOnlineQueries(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+ defer dbs.Close()
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ var accts map[basics.Address]basics.AccountData
+ accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
+ totals, err := accountsTotals(context.Background(), tx, false)
+ require.NoError(t, err)
+
+ var baseAccounts lruAccounts
+ var baseResources lruResources
+ var baseOnlineAccounts lruOnlineAccounts
+ baseAccounts.init(nil, 100, 80)
+ baseResources.init(nil, 100, 80)
+ baseOnlineAccounts.init(nil, 100, 80)
+
+ addrA := basics.Address(crypto.Hash([]byte("A")))
+ addrB := basics.Address(crypto.Hash([]byte("B")))
+ addrC := basics.Address(crypto.Hash([]byte("C")))
+
+ var voteIDA crypto.OneTimeSignatureVerifier
+ crypto.RandBytes(voteIDA[:])
+ var voteIDB crypto.OneTimeSignatureVerifier
+ crypto.RandBytes(voteIDB[:])
+ var voteIDC crypto.OneTimeSignatureVerifier
+ crypto.RandBytes(voteIDC[:])
+
+ dataA1 := ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ Status: basics.Online,
+ },
+ VotingData: ledgercore.VotingData{
+ VoteID: voteIDA,
+ },
+ }
+
+ dataB1 := ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ Status: basics.Online,
+ },
+ VotingData: ledgercore.VotingData{
+ VoteID: voteIDB,
+ },
+ }
+
+ dataC3 := ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000},
+ Status: basics.Online,
+ },
+ VotingData: ledgercore.VotingData{
+ VoteID: voteIDC,
+ },
+ }
+
+ dataA2 := dataA1
+ dataA2.Status = basics.Offline
+ dataA2.VoteID = crypto.OneTimeSignatureVerifier{}
+
+ dataB2 := dataB1
+ dataB2.Status = basics.Offline
+ dataB2.VoteID = crypto.OneTimeSignatureVerifier{}
+
+ delta1 := ledgercore.AccountDeltas{}
+ delta1.Upsert(addrA, dataA1)
+ delta1.Upsert(addrB, dataB1)
+
+ delta2 := ledgercore.AccountDeltas{}
+ delta2.Upsert(addrA, dataA2)
+
+ delta3 := ledgercore.AccountDeltas{}
+ delta3.Upsert(addrB, dataB2)
+ delta3.Upsert(addrC, dataC3)
+
+ addRound := func(rnd basics.Round, updates ledgercore.AccountDeltas) {
+ totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals)
+ accts = applyPartialDeltas(accts, updates)
+
+ oldBase := rnd - 1
+ updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, oldBase, true, baseAccounts)
+ updatesOnlineCnt := makeCompactOnlineAccountDeltas([]ledgercore.AccountDeltas{updates}, oldBase, baseOnlineAccounts)
+
+ err = updatesCnt.accountsLoadOld(tx)
+ require.NoError(t, err)
+
+ err = updatesOnlineCnt.accountsLoadOld(tx)
+ require.NoError(t, err)
+
+ err = accountsPutTotals(tx, totals, false)
+ require.NoError(t, err)
+ updatedAccts, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, map[basics.CreatableIndex]ledgercore.ModifiedCreatable{}, proto, rnd)
+ require.NoError(t, err)
+ require.Equal(t, updatesCnt.len(), len(updatedAccts))
+
+ updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, rnd)
+ require.NoError(t, err)
+ require.NotEmpty(t, updatedOnlineAccts)
+
+ err = updateAccountsRound(tx, rnd)
+ require.NoError(t, err)
+ }
+
+ addRound(1, delta1)
+ addRound(2, delta2)
+ addRound(3, delta3)
+
+ queries, err := onlineAccountsInitDbQueries(tx)
+ require.NoError(t, err)
+
+ // check round 1
+ rnd := basics.Round(1)
+ online, err := accountsOnlineTop(tx, rnd, 0, 10, proto)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(online))
+ require.NotContains(t, online, addrC)
+
+ onlineAcctA, ok := online[addrA]
+ require.True(t, ok)
+ require.NotNil(t, onlineAcctA)
+ require.Equal(t, addrA, onlineAcctA.Address)
+ require.Equal(t, dataA1.AccountBaseData.MicroAlgos, onlineAcctA.MicroAlgos)
+
+ onlineAcctB, ok := online[addrB]
+ require.True(t, ok)
+ require.NotNil(t, onlineAcctB)
+ require.Equal(t, addrB, onlineAcctB.Address)
+ require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos)
+
+ paod, err := queries.lookupOnline(addrA, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrA, paod.addr)
+ require.Equal(t, dataA1.AccountBaseData.MicroAlgos, paod.accountData.MicroAlgos)
+ require.Equal(t, voteIDA, paod.accountData.VoteID)
+
+ paod, err = queries.lookupOnline(addrB, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrB, paod.addr)
+ require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.accountData.MicroAlgos)
+ require.Equal(t, voteIDB, paod.accountData.VoteID)
+
+ paod, err = queries.lookupOnline(addrC, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrC, paod.addr)
+ require.Empty(t, paod.accountData)
+
+ // check round 2
+ rnd = basics.Round(2)
+ online, err = accountsOnlineTop(tx, rnd, 0, 10, proto)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(online))
+ require.NotContains(t, online, addrA)
+ require.NotContains(t, online, addrC)
+
+ onlineAcctB, ok = online[addrB]
+ require.True(t, ok)
+ require.NotNil(t, onlineAcctB)
+ require.Equal(t, addrB, onlineAcctB.Address)
+ require.Equal(t, dataB1.AccountBaseData.MicroAlgos, onlineAcctB.MicroAlgos)
+
+ paod, err = queries.lookupOnline(addrA, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrA, paod.addr)
+ require.Empty(t, paod.accountData)
+
+ paod, err = queries.lookupOnline(addrB, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrB, paod.addr)
+ require.Equal(t, dataB1.AccountBaseData.MicroAlgos, paod.accountData.MicroAlgos)
+ require.Equal(t, voteIDB, paod.accountData.VoteID)
+
+ paod, err = queries.lookupOnline(addrC, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrC, paod.addr)
+ require.Empty(t, paod.accountData)
+
+ // check round 3
+ rnd = basics.Round(3)
+ online, err = accountsOnlineTop(tx, rnd, 0, 10, proto)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(online))
+ require.NotContains(t, online, addrA)
+ require.NotContains(t, online, addrB)
+
+ onlineAcctC, ok := online[addrC]
+ require.True(t, ok)
+ require.NotNil(t, onlineAcctC)
+ require.Equal(t, addrC, onlineAcctC.Address)
+ require.Equal(t, dataC3.AccountBaseData.MicroAlgos, onlineAcctC.MicroAlgos)
+
+ paod, err = queries.lookupOnline(addrA, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrA, paod.addr)
+ require.Empty(t, paod.accountData)
+
+ paod, err = queries.lookupOnline(addrB, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrB, paod.addr)
+ require.Empty(t, paod.accountData)
+
+ paod, err = queries.lookupOnline(addrC, rnd)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), paod.round)
+ require.Equal(t, addrC, paod.addr)
+ require.Equal(t, dataC3.AccountBaseData.MicroAlgos, paod.accountData.MicroAlgos)
+ require.Equal(t, voteIDC, paod.accountData.VoteID)
+
+ paods, err := onlineAccountsAll(tx, 0)
+ require.NoError(t, err)
+ require.Equal(t, 5, len(paods))
+
+ // expect:
+ //
+ // addr | rnd | status
+ // -----|-----|--------
+ // B | 1 | 1
+ // B | 3 | 0
+ // C | 3 | 1
+ // A | 1 | 1
+ // A | 2 | 0
+
+ checkAddrB := func() {
+ require.Equal(t, int64(2), paods[0].rowid)
+ require.Equal(t, basics.Round(1), paods[0].updRound)
+ require.Equal(t, addrB, paods[0].addr)
+ require.Equal(t, int64(4), paods[1].rowid)
+ require.Equal(t, basics.Round(3), paods[1].updRound)
+ require.Equal(t, addrB, paods[1].addr)
+ }
+
+ checkAddrC := func() {
+ require.Equal(t, int64(5), paods[2].rowid)
+ require.Equal(t, basics.Round(3), paods[2].updRound)
+ require.Equal(t, addrC, paods[2].addr)
+ }
+
+ checkAddrA := func() {
+ require.Equal(t, int64(1), paods[3].rowid)
+ require.Equal(t, basics.Round(1), paods[3].updRound)
+ require.Equal(t, addrA, paods[3].addr)
+ require.Equal(t, int64(3), paods[4].rowid)
+ require.Equal(t, basics.Round(2), paods[4].updRound)
+ require.Equal(t, addrA, paods[4].addr)
+ }
+
+ checkAddrB()
+ checkAddrC()
+ checkAddrA()
+
+ paods, err = onlineAccountsAll(tx, 3)
+ require.NoError(t, err)
+ require.Equal(t, 5, len(paods))
+ checkAddrB()
+ checkAddrC()
+ checkAddrA()
+
+ paods, err = onlineAccountsAll(tx, 2)
+ require.NoError(t, err)
+ require.Equal(t, 3, len(paods))
+ checkAddrB()
+ checkAddrC()
+
+ paods, err = onlineAccountsAll(tx, 1)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(paods))
+ checkAddrB()
+
+ paods, rnd, err = queries.lookupOnlineHistory(addrA)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), rnd)
+ require.Equal(t, 2, len(paods))
+ require.Equal(t, int64(1), paods[0].rowid)
+ require.Equal(t, basics.Round(1), paods[0].updRound)
+ require.Equal(t, int64(3), paods[1].rowid)
+ require.Equal(t, basics.Round(2), paods[1].updRound)
+
+ paods, rnd, err = queries.lookupOnlineHistory(addrB)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), rnd)
+ require.Equal(t, 2, len(paods))
+ require.Equal(t, int64(2), paods[0].rowid)
+ require.Equal(t, basics.Round(1), paods[0].updRound)
+ require.Equal(t, int64(4), paods[1].rowid)
+ require.Equal(t, basics.Round(3), paods[1].updRound)
+
+ paods, rnd, err = queries.lookupOnlineHistory(addrC)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(3), rnd)
+ require.Equal(t, 1, len(paods))
+ require.Equal(t, int64(5), paods[0].rowid)
+ require.Equal(t, basics.Round(3), paods[0].updRound)
+}
+
+type mockOnlineAccountsWriter struct {
+ rowid int64
+}
+
+func (w *mockOnlineAccountsWriter) insertOnlineAccount(addr basics.Address, normBalance uint64, data baseOnlineAccountData, updRound uint64, voteLastValid uint64) (rowid int64, err error) {
+ w.rowid++
+ return w.rowid, nil
+}
+
+func (w *mockOnlineAccountsWriter) close() {}
+
+func TestAccountOnlineAccountsNewRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ writer := &mockOnlineAccountsWriter{rowid: 100}
+
+ updates := compactOnlineAccountDeltas{}
+ addrA := ledgertesting.RandomAddress()
+ addrB := ledgertesting.RandomAddress()
+ addrC := ledgertesting.RandomAddress()
+ addrD := ledgertesting.RandomAddress()
+ addrE := ledgertesting.RandomAddress()
+
+ // acct A is empty
+ deltaA := onlineAccountDelta{
+ address: addrA,
+ }
+ // acct B is new and offline
+ deltaB := onlineAccountDelta{
+ address: addrB,
+ newAcct: []baseOnlineAccountData{{
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ }},
+ updRound: []uint64{1},
+ newStatus: []basics.Status{basics.Offline},
+ }
+ // acct C is new and online
+ deltaC := onlineAccountDelta{
+ address: addrC,
+ newAcct: []baseOnlineAccountData{{
+ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 500},
+ }},
+ newStatus: []basics.Status{basics.Online},
+ updRound: []uint64{2},
+ }
+ // acct D is old and went offline
+ deltaD := onlineAccountDelta{
+ address: addrD,
+ oldAcct: persistedOnlineAccountData{
+ addr: addrD,
+ accountData: baseOnlineAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 400_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 500},
+ },
+ rowid: 1,
+ },
+ newAcct: []baseOnlineAccountData{{
+ MicroAlgos: basics.MicroAlgos{Raw: 400_000_000},
+ }},
+ newStatus: []basics.Status{basics.Offline},
+ updRound: []uint64{3},
+ }
+
+ // acct E is old online
+ deltaE := onlineAccountDelta{
+ address: addrE,
+ oldAcct: persistedOnlineAccountData{
+ addr: addrE,
+ accountData: baseOnlineAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 500_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 500},
+ },
+ rowid: 2,
+ },
+ newAcct: []baseOnlineAccountData{{
+ MicroAlgos: basics.MicroAlgos{Raw: 500_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 600},
+ }},
+ newStatus: []basics.Status{basics.Online},
+ updRound: []uint64{4},
+ }
+
+ updates.deltas = append(updates.deltas, deltaA, deltaB, deltaC, deltaD, deltaE)
+ lastUpdateRound := basics.Round(1)
+ updated, err := onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.NoError(t, err)
+
+ require.Len(t, updated, 3)
+ require.Equal(t, updated[0].addr, addrC)
+ require.Equal(t, updated[1].addr, addrD)
+ require.Equal(t, updated[2].addr, addrE)
+
+ // check errors: new online with empty voting data
+ deltaC.newStatus[0] = basics.Online
+ deltaC.newAcct[0].VoteFirstValid = 0
+ updates.deltas = []onlineAccountDelta{deltaC}
+ _, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.Error(t, err)
+
+ // check errors: new non-online with non-empty voting data
+ deltaB.newStatus[0] = basics.Offline
+ deltaB.newAcct[0].VoteFirstValid = 1
+ updates.deltas = []onlineAccountDelta{deltaB}
+ _, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.Error(t, err)
+
+ // check errors: new online with empty voting data
+ deltaD.newStatus[0] = basics.Online
+ updates.deltas = []onlineAccountDelta{deltaD}
+ _, err = onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.Error(t, err)
+}
+
+func TestAccountOnlineAccountsNewRoundFlip(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ writer := &mockOnlineAccountsWriter{rowid: 100}
+
+ updates := compactOnlineAccountDeltas{}
+ addrA := ledgertesting.RandomAddress()
+ addrB := ledgertesting.RandomAddress()
+ addrC := ledgertesting.RandomAddress()
+
+ // acct A is new, offline and then online
+ deltaA := onlineAccountDelta{
+ address: addrA,
+ newAcct: []baseOnlineAccountData{
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 100},
+ },
+ },
+ updRound: []uint64{1, 2},
+ newStatus: []basics.Status{basics.Offline, basics.Online},
+ }
+ // acct B is new and online and then offline
+ deltaB := onlineAccountDelta{
+ address: addrB,
+ newAcct: []baseOnlineAccountData{
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 200},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ },
+ },
+ updRound: []uint64{3, 4},
+ newStatus: []basics.Status{basics.Online, basics.Offline},
+ }
+ // acct C is old online, then online and then offline
+ deltaC := onlineAccountDelta{
+ address: addrC,
+ oldAcct: persistedOnlineAccountData{
+ addr: addrC,
+ accountData: baseOnlineAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 300},
+ },
+ rowid: 1,
+ },
+ newAcct: []baseOnlineAccountData{
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 301},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 300_000_000},
+ },
+ },
+ newStatus: []basics.Status{basics.Online, basics.Offline},
+ updRound: []uint64{5, 6},
+ }
+
+ updates.deltas = append(updates.deltas, deltaA, deltaB, deltaC)
+ lastUpdateRound := basics.Round(1)
+ updated, err := onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.NoError(t, err)
+
+ require.Len(t, updated, 5)
+ require.Equal(t, updated[0].addr, addrA)
+ require.Equal(t, updated[1].addr, addrB)
+ require.Equal(t, updated[2].addr, addrB)
+ require.Equal(t, updated[3].addr, addrC)
+ require.Equal(t, updated[4].addr, addrC)
+}
+
+func TestAccountOnlineRoundParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+ defer dbs.Close()
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ var accts map[basics.Address]basics.AccountData
+ accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
+
+ // entry i is for round i+1 since db initialized with entry for round 0
+ const maxRounds = 40 // any number
+ onlineRoundParams := make([]ledgercore.OnlineRoundParamsData, maxRounds)
+ for i := range onlineRoundParams {
+ onlineRoundParams[i].OnlineSupply = uint64(i + 1)
+ onlineRoundParams[i].CurrentProtocol = protocol.ConsensusCurrentVersion
+ onlineRoundParams[i].RewardsLevel = uint64(i + 1)
+ }
+
+ err = accountsPutOnlineRoundParams(tx, onlineRoundParams, 1)
+ require.NoError(t, err)
+
+ dbOnlineRoundParams, endRound, err := accountsOnlineRoundParams(tx)
+ require.NoError(t, err)
+ require.Equal(t, maxRounds+1, len(dbOnlineRoundParams)) // +1 comes from init state
+ require.Equal(t, onlineRoundParams, dbOnlineRoundParams[1:])
+ require.Equal(t, maxRounds, int(endRound))
+
+ err = accountsPruneOnlineRoundParams(tx, 10)
+ require.NoError(t, err)
+
+ dbOnlineRoundParams, endRound, err = accountsOnlineRoundParams(tx)
+ require.NoError(t, err)
+ require.Equal(t, onlineRoundParams[9:], dbOnlineRoundParams)
+ require.Equal(t, maxRounds, int(endRound))
+}
+
+func TestRowidsToChunkedArgs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ res := rowidsToChunkedArgs([]int64{1})
+ require.Equal(t, 1, cap(res))
+ require.Equal(t, 1, len(res))
+ require.Equal(t, 1, cap(res[0]))
+ require.Equal(t, 1, len(res[0]))
+ require.Equal(t, []interface{}{int64(1)}, res[0])
+
+ input := make([]int64, 999)
+ for i := 0; i < len(input); i++ {
+ input[i] = int64(i)
+ }
+ res = rowidsToChunkedArgs(input)
+ require.Equal(t, 1, cap(res))
+ require.Equal(t, 1, len(res))
+ require.Equal(t, 999, cap(res[0]))
+ require.Equal(t, 999, len(res[0]))
+ for i := 0; i < len(input); i++ {
+ require.Equal(t, interface{}(int64(i)), res[0][i])
+ }
+
+ input = make([]int64, 1001)
+ for i := 0; i < len(input); i++ {
+ input[i] = int64(i)
+ }
+ res = rowidsToChunkedArgs(input)
+ require.Equal(t, 2, cap(res))
+ require.Equal(t, 2, len(res))
+ require.Equal(t, 999, cap(res[0]))
+ require.Equal(t, 999, len(res[0]))
+ require.Equal(t, 2, cap(res[1]))
+ require.Equal(t, 2, len(res[1]))
+ for i := 0; i < 999; i++ {
+ require.Equal(t, interface{}(int64(i)), res[0][i])
+ }
+ j := 0
+ for i := 999; i < len(input); i++ {
+ require.Equal(t, interface{}(int64(i)), res[1][j])
+ j++
+ }
+
+ input = make([]int64, 2*999)
+ for i := 0; i < len(input); i++ {
+ input[i] = int64(i)
+ }
+ res = rowidsToChunkedArgs(input)
+ require.Equal(t, 2, cap(res))
+ require.Equal(t, 2, len(res))
+ require.Equal(t, 999, cap(res[0]))
+ require.Equal(t, 999, len(res[0]))
+ require.Equal(t, 999, cap(res[1]))
+ require.Equal(t, 999, len(res[1]))
+ for i := 0; i < 999; i++ {
+ require.Equal(t, interface{}(int64(i)), res[0][i])
+ }
+ j = 0
+ for i := 999; i < len(input); i++ {
+ require.Equal(t, interface{}(int64(i)), res[1][j])
+ j++
+ }
+}
+
+// TestAccountDBTxTailLoad checks txtailNewRound and loadTxTail delete and load right data
+func TestAccountDBTxTailLoad(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const inMem = true
+ dbs, _ := dbOpenTest(t, inMem)
+ setDbLogging(t, dbs)
+ defer dbs.Close()
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ err = accountsCreateTxTailTable(context.Background(), tx)
+ require.NoError(t, err)
+
+ // insert 1500 rounds and retain past 1001
+ startRound := basics.Round(1)
+ endRound := basics.Round(1500)
+ roundData := make([][]byte, 1500)
+ const retainSize = 1001
+ for i := startRound; i <= endRound; i++ {
+ data := txTailRound{Hdr: bookkeeping.BlockHeader{TimeStamp: int64(i)}}
+ roundData[i-1] = protocol.Encode(&data)
+ }
+ forgetBefore := (endRound + 1).SubSaturate(retainSize)
+ err = txtailNewRound(context.Background(), tx, startRound, roundData, forgetBefore)
+ require.NoError(t, err)
+
+ data, _, baseRound, err := loadTxTail(context.Background(), tx, endRound)
+ require.NoError(t, err)
+ require.Len(t, data, retainSize)
+ require.Equal(t, basics.Round(endRound-retainSize+1), baseRound) // 500...1500
+
+ for i, entry := range data {
+ require.Equal(t, int64(i+int(baseRound)), entry.Hdr.TimeStamp)
+ }
+}
+
+// TestOnlineAccountsDeletion checks the onlineAccountsDelete preseves online accounts entries
+// and deleted only expired offline and online rows
+// Round 1 2 3 4 5 6 7
+// Acct A On Off On
+// Acct B On On
+// Expectations:
+// onlineAccountsDelete(1): A online
+// onlineAccountsDelete(2): A online
+// onlineAccountsDelete(3): A offline, B online
+// etc
+func TestOnlineAccountsDeletion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+ defer dbs.Close()
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ var accts map[basics.Address]basics.AccountData
+ accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
+
+ updates := compactOnlineAccountDeltas{}
+ addrA := ledgertesting.RandomAddress()
+ addrB := ledgertesting.RandomAddress()
+
+ deltaA := onlineAccountDelta{
+ address: addrA,
+ newAcct: []baseOnlineAccountData{
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 100},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 600},
+ },
+ },
+ updRound: []uint64{1, 3, 6},
+ newStatus: []basics.Status{basics.Online, basics.Offline, basics.Online},
+ }
+ // acct B is new and online and then offline
+ deltaB := onlineAccountDelta{
+ address: addrB,
+ newAcct: []baseOnlineAccountData{
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 300},
+ },
+ {
+ MicroAlgos: basics.MicroAlgos{Raw: 200_000_000},
+ baseVotingData: baseVotingData{VoteFirstValid: 700},
+ },
+ },
+ updRound: []uint64{3, 7},
+ newStatus: []basics.Status{basics.Online, basics.Online},
+ }
+
+ updates.deltas = append(updates.deltas, deltaA, deltaB)
+ writer, err := makeOnlineAccountsSQLWriter(tx, updates.len() > 0)
+ if err != nil {
+ return
+ }
+ defer writer.close()
+
+ lastUpdateRound := basics.Round(10)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ updated, err := onlineAccountsNewRoundImpl(writer, updates, proto, lastUpdateRound)
+ require.NoError(t, err)
+ require.Len(t, updated, 5)
+
+ queries, err := onlineAccountsInitDbQueries(tx)
+ require.NoError(t, err)
+
+ var count int64
+ var history []persistedOnlineAccountData
+ var validThrough basics.Round
+ for _, rnd := range []basics.Round{1, 2, 3} {
+ err = onlineAccountsDelete(tx, rnd)
+ require.NoError(t, err)
+
+ err = tx.QueryRow("SELECT COUNT(1) FROM onlineaccounts").Scan(&count)
+ require.NoError(t, err)
+ require.Equal(t, int64(5), count)
+
+ history, validThrough, err = queries.lookupOnlineHistory(addrA)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough) // not set
+ require.Len(t, history, 3)
+ history, validThrough, err = queries.lookupOnlineHistory(addrB)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough)
+ require.Len(t, history, 2)
+ }
+
+ for _, rnd := range []basics.Round{4, 5, 6, 7} {
+ err = onlineAccountsDelete(tx, rnd)
+ require.NoError(t, err)
+
+ err = tx.QueryRow("SELECT COUNT(1) FROM onlineaccounts").Scan(&count)
+ require.NoError(t, err)
+ require.Equal(t, int64(3), count)
+
+ history, validThrough, err = queries.lookupOnlineHistory(addrA)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough)
+ require.Len(t, history, 1)
+ history, validThrough, err = queries.lookupOnlineHistory(addrB)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough)
+ require.Len(t, history, 2)
+ }
+
+ for _, rnd := range []basics.Round{8, 9} {
+ err = onlineAccountsDelete(tx, rnd)
+ require.NoError(t, err)
+
+ err = tx.QueryRow("SELECT COUNT(1) FROM onlineaccounts").Scan(&count)
+ require.NoError(t, err)
+ require.Equal(t, int64(2), count)
+
+ history, validThrough, err = queries.lookupOnlineHistory(addrA)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough)
+ require.Len(t, history, 1)
+ history, validThrough, err = queries.lookupOnlineHistory(addrB)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(0), validThrough)
+ require.Len(t, history, 1)
+ }
+}
+
+// Test functions operating on catchpointfirststageinfo table.
+func TestCatchpointFirstStageInfoTable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+
+ ctx := context.Background()
+
+ err := accountsCreateCatchpointFirstStageInfoTable(ctx, dbs.Wdb.Handle)
+ require.NoError(t, err)
+
+ for _, round := range []basics.Round{4, 6, 8} {
+ info := catchpointFirstStageInfo{
+ TotalAccounts: uint64(round) * 10,
+ }
+ err = insertOrReplaceCatchpointFirstStageInfo(ctx, dbs.Wdb.Handle, round, &info)
+ require.NoError(t, err)
+ }
+
+ for _, round := range []basics.Round{4, 6, 8} {
+ info, exists, err := selectCatchpointFirstStageInfo(ctx, dbs.Rdb.Handle, round)
+ require.NoError(t, err)
+ require.True(t, exists)
+
+ infoExpected := catchpointFirstStageInfo{
+ TotalAccounts: uint64(round) * 10,
+ }
+ require.Equal(t, infoExpected, info)
+ }
+
+ _, exists, err := selectCatchpointFirstStageInfo(ctx, dbs.Rdb.Handle, 7)
+ require.NoError(t, err)
+ require.False(t, exists)
+
+ rounds, err := selectOldCatchpointFirstStageInfoRounds(ctx, dbs.Rdb.Handle, 6)
+ require.NoError(t, err)
+ require.Equal(t, []basics.Round{4, 6}, rounds)
+
+ err = deleteOldCatchpointFirstStageInfo(ctx, dbs.Wdb.Handle, 6)
+ require.NoError(t, err)
+
+ rounds, err = selectOldCatchpointFirstStageInfoRounds(ctx, dbs.Rdb.Handle, 9)
+ require.NoError(t, err)
+ require.Equal(t, []basics.Round{8}, rounds)
+}
+
+func TestUnfinishedCatchpointsTable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+
+ err := accountsCreateUnfinishedCatchpointsTable(
+ context.Background(), dbs.Wdb.Handle)
+ require.NoError(t, err)
+
+ var d3 crypto.Digest
+ rand.Read(d3[:])
+ err = insertUnfinishedCatchpoint(context.Background(), dbs.Wdb.Handle, 3, d3)
+ require.NoError(t, err)
+
+ var d5 crypto.Digest
+ rand.Read(d5[:])
+ err = insertUnfinishedCatchpoint(context.Background(), dbs.Wdb.Handle, 5, d5)
+ require.NoError(t, err)
+
+ ret, err := selectUnfinishedCatchpoints(context.Background(), dbs.Rdb.Handle)
+ require.NoError(t, err)
+ expected := []unfinishedCatchpointRecord{
+ {
+ round: 3,
+ blockHash: d3,
+ },
+ {
+ round: 5,
+ blockHash: d5,
+ },
+ }
+ require.Equal(t, expected, ret)
+
+ err = deleteUnfinishedCatchpoint(context.Background(), dbs.Wdb.Handle, 3)
+ require.NoError(t, err)
+
+ ret, err = selectUnfinishedCatchpoints(context.Background(), dbs.Rdb.Handle)
+ require.NoError(t, err)
+ expected = []unfinishedCatchpointRecord{
+ {
+ round: 5,
+ blockHash: d5,
+ },
+ }
+ require.Equal(t, expected, ret)
+}
+
+func TestRemoveOfflineStateProofID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := ledgertesting.RandomAccounts(20, true)
+ expectedAccts := make(map[basics.Address]basics.AccountData)
+ for addr, acct := range accts {
+ rand.Read(acct.StateProofID[:])
+ accts[addr] = acct
+
+ expectedAcct := acct
+ if acct.Status != basics.Online {
+ expectedAcct.StateProofID = merklesignature.Commitment{}
+ }
+ expectedAccts[addr] = expectedAcct
+
+ }
+
+ buildDB := func(accounts map[basics.Address]basics.AccountData) (db.Pair, *sql.Tx) {
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+
+ // this is the same seq as accountsInitTest makes but it stops
+ // before the online accounts table creation to generate a trie and commit it
+ _, err = accountsInit(tx, accounts, config.Consensus[protocol.ConsensusCurrentVersion])
+ require.NoError(t, err)
+
+ err = accountsAddNormalizedBalance(tx, config.Consensus[protocol.ConsensusCurrentVersion])
+ require.NoError(t, err)
+
+ err = accountsCreateResourceTable(context.Background(), tx)
+ require.NoError(t, err)
+
+ err = performResourceTableMigration(context.Background(), tx, nil)
+ require.NoError(t, err)
+
+ return dbs, tx
+ }
+
+ dbs, tx := buildDB(accts)
+ defer dbs.Close()
+ defer tx.Rollback()
+
+ // make second copy of DB to prepare exepected/fixed merkle trie
+ expectedDBs, expectedTx := buildDB(expectedAccts)
+ defer expectedDBs.Close()
+ defer expectedTx.Rollback()
+
+ // create account hashes
+ computeRootHash := func(tx *sql.Tx, expected bool) (crypto.Digest, error) {
+ rows, err := tx.Query("SELECT address, data FROM accountbase")
+ require.NoError(t, err)
+ defer rows.Close()
+
+ mc, err := MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ var addr basics.Address
+ for rows.Next() {
+ var addrbuf []byte
+ var encodedAcctData []byte
+ err = rows.Scan(&addrbuf, &encodedAcctData)
+ require.NoError(t, err)
+ copy(addr[:], addrbuf)
+ var ba baseAccountData
+ err = protocol.Decode(encodedAcctData, &ba)
+ require.NoError(t, err)
+ if expected && ba.Status != basics.Online {
+ require.Equal(t, merklesignature.Commitment{}, ba.StateProofID)
+ }
+ addHash := accountHashBuilderV6(addr, &ba, encodedAcctData)
+ added, err := trie.Add(addHash)
+ require.NoError(t, err)
+ require.True(t, added)
+ }
+ _, err = trie.Evict(true)
+ require.NoError(t, err)
+ return trie.RootHash()
+ }
+ oldRoot, err := computeRootHash(tx, false)
+ require.NoError(t, err)
+ require.NotEmpty(t, oldRoot)
+
+ expectedRoot, err := computeRootHash(expectedTx, true)
+ require.NoError(t, err)
+ require.NotEmpty(t, expectedRoot)
+
+ err = accountsCreateOnlineAccountsTable(context.Background(), tx)
+ require.NoError(t, err)
+ err = performOnlineAccountsTableMigration(context.Background(), tx, nil, nil)
+ require.NoError(t, err)
+
+ // get the new hash and ensure it does not match to the old one (data migrated)
+ mc, err := MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ newRoot, err := trie.RootHash()
+ require.NoError(t, err)
+ require.NotEmpty(t, newRoot)
+
+ require.NotEqual(t, oldRoot, newRoot)
+ require.Equal(t, expectedRoot, newRoot)
+
+ rows, err := tx.Query("SELECT addrid, data FROM accountbase")
+ require.NoError(t, err)
+ defer rows.Close()
+
+ for rows.Next() {
+ var addrid sql.NullInt64
+ var encodedAcctData []byte
+ err = rows.Scan(&addrid, &encodedAcctData)
+ require.NoError(t, err)
+ var ba baseAccountData
+ err = protocol.Decode(encodedAcctData, &ba)
+ require.NoError(t, err)
+ if ba.Status != basics.Online {
+ require.True(t, ba.StateProofID.IsEmpty())
+ }
+ }
+}
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
new file mode 100644
index 000000000..8462d7b6b
--- /dev/null
+++ b/ledger/acctonline.go
@@ -0,0 +1,914 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "container/heap"
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-algorand/util/metrics"
+)
+
+type modifiedOnlineAccount struct {
+ // data stores the most recent ledgercore.AccountData for this modified
+ // account.
+ data ledgercore.AccountData
+
+ // ndelta keeps track of how many times this account appears in
+ // accountUpdates.deltas. This is used to evict modifiedAccount
+ // entries when all changes to an account have been reflected in
+ // the account DB, and no outstanding modifications remain.
+ ndeltas int
+}
+
+// cachedOnlineAccount is a light-weight version of persistedOnlineAccountData suitable for in-memory caching
+//msgp:ignore cachedOnlineAccount
+type cachedOnlineAccount struct {
+ baseOnlineAccountData
+ updRound basics.Round
+}
+
+// onlineAccounts tracks history of online accounts
+type onlineAccounts struct {
+ // Connection to the database.
+ dbs db.Pair
+
+ // Prepared SQL statements for fast accounts DB lookups.
+ accountsq *onlineAccountsDbQueries
+
+ // cachedDBRoundOnline is always exactly tracker DB round (and therefore, onlineAccountsRound()),
+ // cached to use in lookup functions
+ cachedDBRoundOnline basics.Round
+
+ // deltas stores updates for every round after dbRound.
+ deltas []ledgercore.AccountDeltas
+
+ // accounts stores the most recent account state for every
+ // address that appears in deltas.
+ accounts map[basics.Address]modifiedOnlineAccount
+
+ // onlineRoundParamsData stores onlineMoney, rewards from rounds
+ // dbRound + 1 - maxLookback to current round, where maxLookback is max(proto.MaxBalLookback, votersLookback)
+ // It behaves as delta storage and a cache.
+ onlineRoundParamsData []ledgercore.OnlineRoundParamsData
+
+ // log copied from ledger
+ log logging.Logger
+
+ // ledger is the source ledger, which is used to synchronize
+ // the rounds at which we need to flush the balances to disk
+ // in favor of the catchpoint to be generated.
+ ledger ledgerForTracker
+
+ // deltasAccum stores the accumulated deltas for every round starting dbRound-1.
+ deltasAccum []int
+
+ // accountsMu is the synchronization mutex for accessing the various non-static variables.
+ accountsMu deadlock.RWMutex
+
+ // accountsReadCond used to synchronize read access to the internal data structures.
+ accountsReadCond *sync.Cond
+
+ // voters keeps track of Merkle trees of online accounts, used for compact certificates.
+ voters votersTracker
+
+ // baseAccounts stores the most recently used accounts, at exactly dbRound
+ baseOnlineAccounts lruOnlineAccounts
+
+ // onlineAccountsCache contains up to onlineAccountsCacheMaxSize accounts with their complete history
+ // for the range [Lastest - MaxBalLookback - X, Latest - lookback], where X = [0, commit range]
+ // and alway containing an entry for Lastest - MaxBalLookback + 1 if some account is cached.
+ // The invariant is held by
+ // 1) loading a full history when new accounts get added
+ // 2) adding online accounts state changes when flushing to disk
+ // 3) pruning the history by removing older than Lastest - MaxBalLookback non-online entries
+ onlineAccountsCache onlineAccountsCache
+
+ // maxAcctLookback sets the minimim deltas size to keep in memory
+ acctLookback uint64
+}
+
+// initialize initializes the accountUpdates structure
+func (ao *onlineAccounts) initialize(cfg config.Local) {
+ ao.accountsReadCond = sync.NewCond(ao.accountsMu.RLocker())
+ ao.acctLookback = cfg.MaxAcctLookback
+}
+
+// loadFromDisk is the 2nd level initialization, and is required before the onlineAccounts becomes functional
+// The close function is expected to be call in pair with loadFromDisk
+func (ao *onlineAccounts) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) error {
+ ao.accountsMu.Lock()
+ defer ao.accountsMu.Unlock()
+
+ ao.cachedDBRoundOnline = lastBalancesRound
+ ao.ledger = l
+ err := ao.initializeFromDisk(l, lastBalancesRound)
+ if err != nil {
+ return err
+ }
+
+ err = ao.voters.loadFromDisk(l, ao, lastBalancesRound)
+ if err != nil {
+ err = fmt.Errorf("voters tracker failed to loadFromDisk : %w", err)
+ }
+ return err
+}
+
+// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
+// and preparing the onlineAccounts for operation.
+func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
+ ao.dbs = l.trackerDB()
+ ao.log = l.trackerLog()
+
+ err = ao.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var err0 error
+ var endRound basics.Round
+ ao.onlineRoundParamsData, endRound, err0 = accountsOnlineRoundParams(tx)
+ if err0 != nil {
+ return err0
+ }
+ if endRound != ao.cachedDBRoundOnline {
+ return fmt.Errorf("last onlineroundparams round %d does not match dbround %d", endRound, ao.cachedDBRoundOnline)
+ }
+
+ onlineAccounts, err0 := onlineAccountsAll(tx, onlineAccountsCacheMaxSize)
+ if err0 != nil {
+ return err0
+ }
+ ao.onlineAccountsCache.init(onlineAccounts, onlineAccountsCacheMaxSize)
+
+ return nil
+ })
+ if err != nil {
+ return
+ }
+
+ ao.accountsq, err = onlineAccountsInitDbQueries(ao.dbs.Rdb.Handle)
+ if err != nil {
+ return
+ }
+
+ ao.deltas = nil
+ ao.accounts = make(map[basics.Address]modifiedOnlineAccount)
+ ao.deltasAccum = []int{0}
+
+ ao.baseOnlineAccounts.init(ao.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
+ return
+}
+
+// latest returns the latest round
+func (ao *onlineAccounts) latest() basics.Round {
+ return ao.cachedDBRoundOnline + basics.Round(len(ao.deltas))
+}
+
+// close closes the accountUpdates, waiting for all the child go-routine to complete
+func (ao *onlineAccounts) close() {
+ if ao.accountsq != nil {
+ ao.accountsq.close()
+ ao.accountsq = nil
+ }
+
+ ao.voters.close()
+
+ ao.baseOnlineAccounts.prune(0)
+}
+
+// newBlock is the accountUpdates implementation of the ledgerTracker interface. This is the "external" facing function
+// which invokes the internal implementation after taking the lock.
+func (ao *onlineAccounts) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+ ao.accountsMu.Lock()
+ ao.newBlockImpl(blk, delta)
+ ao.accountsMu.Unlock()
+ ao.accountsReadCond.Broadcast()
+}
+
+// newBlockImpl is the accountUpdates implementation of the ledgerTracker interface. This is the "internal" facing function
+// which assumes that no lock need to be taken.
+func (ao *onlineAccounts) newBlockImpl(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+ rnd := blk.Round()
+
+ if rnd <= ao.latest() {
+ // Duplicate, ignore.
+ return
+ }
+
+ if rnd != ao.latest()+1 {
+ ao.log.Panicf("onlineAccounts: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, ao.cachedDBRoundOnline, len(ao.deltas))
+ }
+ ao.deltas = append(ao.deltas, delta.Accts)
+ ao.deltasAccum = append(ao.deltasAccum, delta.Accts.Len()+ao.deltasAccum[len(ao.deltasAccum)-1])
+
+ ao.baseOnlineAccounts.flushPendingWrites()
+
+ for i := 0; i < delta.Accts.Len(); i++ {
+ addr, data := delta.Accts.GetByIdx(i)
+ macct := ao.accounts[addr]
+ macct.ndeltas++
+ macct.data = data
+ ao.accounts[addr] = macct
+ }
+
+ ao.onlineRoundParamsData = append(ao.onlineRoundParamsData, ledgercore.OnlineRoundParamsData{
+ OnlineSupply: delta.Totals.Online.Money.Raw,
+ RewardsLevel: delta.Totals.RewardsLevel,
+ CurrentProtocol: blk.CurrentProtocol,
+ })
+
+ // calling prune would drop old entries from the base accounts.
+ newBaseAccountSize := (len(ao.accounts) + 1) + baseAccountsPendingAccountsBufferSize
+ ao.baseOnlineAccounts.prune(newBaseAccountSize)
+
+ ao.voters.newBlock(blk.BlockHeader)
+
+}
+
+// committedUpTo implements the ledgerTracker interface for accountUpdates.
+// The method informs the tracker that committedRound and all it's previous rounds have
+// been committed to the block database. The method returns what is the oldest round
+// number that can be removed from the blocks database as well as the lookback that this
+// tracker maintains.
+func (ao *onlineAccounts) committedUpTo(committedRound basics.Round) (retRound, lookback basics.Round) {
+ ao.accountsMu.RLock()
+ defer ao.accountsMu.RUnlock()
+
+ retRound = basics.Round(0)
+ lookback = basics.Round(ao.acctLookback)
+ if committedRound < lookback {
+ return
+ }
+
+ retRound = ao.cachedDBRoundOnline
+ lowestRound := ao.voters.lowestRound(ao.cachedDBRoundOnline)
+ if lowestRound > 0 && lowestRound < retRound {
+ retRound = lowestRound
+ }
+ return
+}
+
+// produceCommittingTask enqueues committing the balances for round committedRound-lookback.
+// The deferred committing is done so that we could calculate the historical balances lookback rounds back.
+// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistence of this
+// operation to a syncer goroutine. The one caveat is that when storing a catchpoint round, we would want to
+// wait until the catchpoint creation is done, so that the persistence of the catchpoint file would have an
+// uninterrupted view of the balances at a given point of time.
+func (ao *onlineAccounts) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ var offset uint64
+ ao.accountsMu.RLock()
+ defer ao.accountsMu.RUnlock()
+
+ // repeat logic from account updates
+ // TODO: after clean up removing 320 rounds lookback
+ if committedRound < dcr.lookback {
+ return nil
+ }
+
+ newBase := committedRound - dcr.lookback
+ if newBase <= dbRound {
+ // Already forgotten
+ return nil
+ }
+
+ if newBase > dbRound+basics.Round(len(ao.deltas)) {
+ ao.log.Panicf("produceCommittingTask: block %d too far in the future, lookback %d, dbRound %d (cached %d), deltas %d", committedRound, dcr.lookback, dbRound, ao.cachedDBRoundOnline, len(ao.deltas))
+ }
+
+ lowestRound := ao.voters.lowestRound(newBase)
+
+ offset = uint64(newBase - dbRound)
+ offset = ao.consecutiveVersion(offset)
+
+ // synchronize base and offset with account updates
+ if offset < dcr.offset {
+ dcr.offset = offset
+ }
+ dcr.oldBase = dbRound
+ dcr.lowestRound = lowestRound
+ return dcr
+}
+
+func (ao *onlineAccounts) consecutiveVersion(offset uint64) uint64 {
+ // Index that corresponds to the data at dbRound,
+ startIndex := len(ao.onlineRoundParamsData) - len(ao.deltas) - 1
+ // check if this update chunk spans across multiple consensus versions. If so, break it so that each update would tackle only a single
+ // consensus version.
+ // startIndex + 1 is the first delta's data, startIndex+int(offset) is the las delta's index from the commit range
+ if ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+int(offset)].CurrentProtocol {
+ // find the tip point.
+ tipPoint := sort.Search(int(offset), func(i int) bool {
+ // we're going to search here for version inequality, with the assumption that consensus versions won't repeat.
+ // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3].
+ return ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+1+i].CurrentProtocol
+ })
+ // no need to handle the case of "no found", or tipPoint==int(offset), since we already know that it's there.
+ offset = uint64(tipPoint)
+ }
+ return offset
+}
+
+func (ao *onlineAccounts) handleUnorderedCommit(dcc *deferredCommitContext) {
+}
+
+func (ao *onlineAccounts) maxBalLookback() uint64 {
+ lastProtoVersion := ao.onlineRoundParamsData[len(ao.onlineRoundParamsData)-1].CurrentProtocol
+ return config.Consensus[lastProtoVersion].MaxBalLookback
+}
+
+// prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly.
+func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error {
+ offset := dcc.offset
+
+ ao.accountsMu.RLock()
+ defer ao.accountsMu.RUnlock()
+
+ // create a copy of the deltas, round totals and protos for the range we're going to flush.
+ deltas := make([]ledgercore.AccountDeltas, offset)
+ copy(deltas, ao.deltas[:offset])
+
+ // verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
+ // Index that corresponds to the oldest round still in deltas
+ startIndex := len(ao.onlineRoundParamsData) - len(ao.deltas) - 1
+ if ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+int(offset)].CurrentProtocol {
+ ao.accountsMu.RUnlock()
+
+ // in scheduleCommit, we expect that this function to update the catchpointWriting when
+ // it's on a catchpoint round and the node is configured to generate catchpoints. Doing this in a deferred function
+ // here would prevent us from "forgetting" to update this variable later on.
+ // The same is repeated in commitRound on errors.
+ if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
+ atomic.StoreInt32(dcc.catchpointDataWriting, 0)
+ }
+ return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
+ }
+
+ // compact all the deltas - when we're trying to persist multiple rounds, we might have the same account
+ // being updated multiple times. When that happen, we can safely omit the intermediate updates.
+ dcc.compactOnlineAccountDeltas = makeCompactOnlineAccountDeltas(deltas, dcc.oldBase, ao.baseOnlineAccounts)
+
+ dcc.genesisProto = ao.ledger.GenesisProto()
+
+ start, err := ao.roundParamsOffset(dcc.oldBase)
+ if err != nil {
+ return err
+ }
+ end, err := ao.roundParamsOffset(dcc.newBase)
+ if err != nil {
+ return err
+ }
+ // write for rounds oldbase+1 up to and including newbase
+ dcc.onlineRoundParams = ao.onlineRoundParamsData[start+1 : end+1]
+
+ maxOnlineLookback := basics.Round(ao.maxBalLookback())
+ dcc.onlineAccountsForgetBefore = (dcc.newBase + 1).SubSaturate(maxOnlineLookback)
+ if dcc.lowestRound > 0 && dcc.lowestRound < dcc.onlineAccountsForgetBefore {
+ // extend history as needed
+ dcc.onlineAccountsForgetBefore = dcc.lowestRound
+ }
+
+ return nil
+}
+
+// commitRound closure is called within the same transaction for all trackers
+// it receives current offset and dbRound
+func (ao *onlineAccounts) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+
+ _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
+ if err != nil {
+ return err
+ }
+
+ err = dcc.compactOnlineAccountDeltas.accountsLoadOld(tx)
+ if err != nil {
+ return err
+ }
+
+ // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
+ // so that we can update the base account back.
+ dcc.updatedPersistedOnlineAccounts, err = onlineAccountsNewRound(tx, dcc.compactOnlineAccountDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ if err != nil {
+ return err
+ }
+
+ err = onlineAccountsDelete(tx, dcc.onlineAccountsForgetBefore)
+ if err != nil {
+ return err
+ }
+
+ err = accountsPutOnlineRoundParams(tx, dcc.onlineRoundParams, dcc.oldBase+1)
+ if err != nil {
+ return err
+ }
+
+ // delete all entries all older than maxBalLookback (or votersLookback) rounds ago
+ err = accountsPruneOnlineRoundParams(tx, dcc.onlineAccountsForgetBefore)
+
+ return
+}
+
+func (ao *onlineAccounts) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ offset := dcc.offset
+ newBase := dcc.newBase
+
+ ao.accountsMu.Lock()
+ // Drop reference counts to modified accounts, and evict them
+ // from in-memory cache when no references remain.
+ for i := 0; i < dcc.compactOnlineAccountDeltas.len(); i++ {
+ acctUpdate := dcc.compactOnlineAccountDeltas.getByIdx(i)
+ cnt := acctUpdate.nOnlineAcctDeltas
+ macct, ok := ao.accounts[acctUpdate.address]
+ if !ok {
+ ao.log.Panicf("inconsistency: flushed %d changes to %s, but not in au.accounts", cnt, acctUpdate.address)
+ }
+
+ if cnt > macct.ndeltas {
+ ao.log.Panicf("inconsistency: flushed %d changes to %s, but au.accounts had %d", cnt, acctUpdate.address, macct.ndeltas)
+ } else if cnt == macct.ndeltas {
+ delete(ao.accounts, acctUpdate.address)
+ } else {
+ macct.ndeltas -= cnt
+ ao.accounts[acctUpdate.address] = macct
+ }
+ }
+
+ for _, persistedAcct := range dcc.updatedPersistedOnlineAccounts {
+ ao.baseOnlineAccounts.write(persistedAcct)
+ ao.onlineAccountsCache.writeFrontIfExist(
+ persistedAcct.addr,
+ cachedOnlineAccount{
+ baseOnlineAccountData: persistedAcct.accountData,
+ updRound: persistedAcct.updRound,
+ })
+ }
+
+ // clear the backing array to let GC collect data
+ // see the comment in acctupdates.go
+ const deltasClearThreshold = 500
+ if offset > deltasClearThreshold {
+ for i := uint64(0); i < offset; i++ {
+ ao.deltas[i] = ledgercore.AccountDeltas{}
+ }
+ }
+
+ ao.deltas = ao.deltas[offset:]
+ ao.deltasAccum = ao.deltasAccum[offset:]
+ ao.cachedDBRoundOnline = newBase
+
+ // onlineRoundParamsData does not require extended history since it is not used in top online accounts
+ maxOnlineLookback := int(ao.maxBalLookback()) + len(ao.deltas)
+ if len(ao.onlineRoundParamsData) > maxOnlineLookback {
+ ao.onlineRoundParamsData = ao.onlineRoundParamsData[len(ao.onlineRoundParamsData)-maxOnlineLookback:]
+ }
+
+ // online accounts defines deletion round as
+ // dcc.onlineAccountsForgetBefore = (dcc.newBase + 1).SubSaturate(maxOnlineLookback)
+ // maxOnlineLookback can be greater than proto.MaxBalLookback because of voters
+ // the cache is not used by top accounts (voters) so keep up to proto.MaxBalLookback rounds back
+ forgetBefore := (newBase + 1).SubSaturate(basics.Round(ao.maxBalLookback()))
+ ao.onlineAccountsCache.prune(forgetBefore)
+
+ ao.accountsMu.Unlock()
+
+ ao.accountsReadCond.Broadcast()
+}
+
+func (ao *onlineAccounts) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+// onlineTotals return the total online balance for the given round.
+func (ao *onlineAccounts) onlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
+ ao.accountsMu.RLock()
+ defer ao.accountsMu.RUnlock()
+ return ao.onlineTotalsImpl(rnd)
+}
+
+// onlineTotalsEx return the total online balance for the given round for extended rounds range
+// by looking into DB
+func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, error) {
+ ao.accountsMu.RLock()
+ totalsOnline, err := ao.onlineTotalsImpl(rnd)
+ ao.accountsMu.RUnlock()
+ if err == nil {
+ return totalsOnline, err
+ }
+
+ var roundOffsetError *RoundOffsetError
+ if !errors.As(err, &roundOffsetError) {
+ ao.log.Errorf("onlineTotalsImpl error: %w", err)
+ }
+
+ totalsOnline, err = ao.accountsq.lookupOnlineTotalsHistory(rnd)
+ return totalsOnline, err
+}
+
+// onlineTotalsImpl returns the online totals of all accounts at the end of round rnd.
+func (ao *onlineAccounts) onlineTotalsImpl(rnd basics.Round) (basics.MicroAlgos, error) {
+ offset, err := ao.roundParamsOffset(rnd)
+ if err != nil {
+ ao.log.Warnf("onlineAccounts failed to fetch online totals for rnd: %d", rnd)
+ return basics.MicroAlgos{}, err
+ }
+
+ onlineRoundParams := ao.onlineRoundParamsData[offset]
+ return basics.MicroAlgos{Raw: onlineRoundParams.OnlineSupply}, nil
+}
+
+// LookupOnlineAccountData returns the online account data for a given address at a given round.
+func (ao *onlineAccounts) LookupOnlineAccountData(rnd basics.Round, addr basics.Address) (data basics.OnlineAccountData, err error) {
+ oad, err := ao.lookupOnlineAccountData(rnd, addr)
+ if err != nil {
+ ao.log.Warnf("onlineAccounts failed to fetch online account data for rnd: %d, addr: %v", rnd, addr)
+ return
+ }
+
+ data.MicroAlgosWithRewards = oad.MicroAlgosWithRewards
+ data.VotingData.VoteID = oad.VotingData.VoteID
+ data.VotingData.SelectionID = oad.VotingData.SelectionID
+ data.VotingData.StateProofID = oad.VotingData.StateProofID
+ data.VotingData.VoteFirstValid = oad.VotingData.VoteFirstValid
+ data.VotingData.VoteLastValid = oad.VotingData.VoteLastValid
+ data.VotingData.VoteKeyDilution = oad.VotingData.VoteKeyDilution
+
+ return
+}
+
+// roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken.
+func (ao *onlineAccounts) roundOffset(rnd basics.Round) (offset uint64, err error) {
+ if rnd < ao.cachedDBRoundOnline {
+ err = &RoundOffsetError{
+ round: rnd,
+ dbRound: ao.cachedDBRoundOnline,
+ }
+ return
+ }
+
+ off := uint64(rnd - ao.cachedDBRoundOnline)
+ if off > uint64(len(ao.deltas)) {
+ err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, ao.cachedDBRoundOnline, len(ao.deltas))
+ return
+ }
+
+ return off, nil
+}
+
+// roundParamsOffset calculates the offset of the given round compared to the onlineRoundParams cache. Requires that the lock would be taken.
+func (ao *onlineAccounts) roundParamsOffset(rnd basics.Round) (offset uint64, err error) {
+ // the invariant is that the last element of ao.onlineRoundParamsData is for round ao.latest()
+ startRound := ao.latest() + 1 - basics.Round(len(ao.onlineRoundParamsData))
+ if rnd < startRound {
+ err = &RoundOffsetError{
+ round: rnd,
+ dbRound: startRound,
+ }
+ return
+ }
+
+ off := uint64(rnd - startRound)
+ if off >= uint64(len(ao.onlineRoundParamsData)) {
+ err = fmt.Errorf("round %d too high: dbRound %d, onlineRoundParamsData %d", rnd, startRound, len(ao.onlineRoundParamsData))
+ return
+ }
+
+ return off, nil
+}
+
+// lookupOnlineAccountData returns the online account data for a given address at a given round.
+func (ao *onlineAccounts) lookupOnlineAccountData(rnd basics.Round, addr basics.Address) (ledgercore.OnlineAccountData, error) {
+ needUnlock := false
+ defer func() {
+ if needUnlock {
+ ao.accountsMu.RUnlock()
+ }
+ }()
+ var err error
+
+ var offset uint64
+ var paramsOffset uint64
+ var rewardsProto config.ConsensusParams
+ var rewardsLevel uint64
+ var persistedData persistedOnlineAccountData
+
+ // the loop serves retrying logic if the database advanced while
+ // the function was analyzing deltas or caches.
+ // a similar approach is used in other lookup- methods in acctupdates as well.
+ for {
+ ao.accountsMu.RLock()
+ needUnlock = true
+ currentDbRound := ao.cachedDBRoundOnline
+ currentDeltaLen := len(ao.deltas)
+ inHistory := false
+ offset, err = ao.roundOffset(rnd)
+ if err != nil {
+ var roundOffsetError *RoundOffsetError
+ if !errors.As(err, &roundOffsetError) {
+ return ledgercore.OnlineAccountData{}, err
+ }
+ // the round number cannot be found in deltas, it is in history
+ inHistory = true
+ err = nil
+ }
+ paramsOffset, err = ao.roundParamsOffset(rnd)
+ if err != nil {
+ return ledgercore.OnlineAccountData{}, err
+ }
+
+ rewardsProto = config.Consensus[ao.onlineRoundParamsData[paramsOffset].CurrentProtocol]
+ rewardsLevel = ao.onlineRoundParamsData[paramsOffset].RewardsLevel
+
+ // check if we've had this address modified in the past rounds. ( i.e. if it's in the deltas )
+ if !inHistory {
+ macct, indeltas := ao.accounts[addr]
+ if indeltas {
+ // Check if this is the most recent round, in which case, we can
+ // use a cache of the most recent account state.
+ if offset == uint64(len(ao.deltas)) {
+ return macct.data.OnlineAccountData(rewardsProto, rewardsLevel), nil
+ }
+ // the account appears in the deltas, but we don't know if it appears in the
+ // delta range of [0..offset], so we'll need to check :
+ // Traverse the deltas backwards to ensure that later updates take
+ // priority if present.
+ // Note the element at offset is handled above.
+ for offset > 0 {
+ offset--
+ d, ok := ao.deltas[offset].GetData(addr)
+ if ok {
+ return d.OnlineAccountData(rewardsProto, rewardsLevel), nil
+ }
+ }
+ }
+ }
+
+ if macct, has := ao.onlineAccountsCache.read(addr, rnd); has {
+ return macct.GetOnlineAccountData(rewardsProto, rewardsLevel), nil
+ }
+
+ ao.accountsMu.RUnlock()
+ needUnlock = false
+
+ // No updates of this account in the in-memory deltas; use on-disk DB.
+ // As an optimization, we avoid creating
+ // a separate transaction here, and directly use a prepared SQL query
+ // against the database.
+ persistedData, err = ao.accountsq.lookupOnline(addr, rnd)
+ if err != nil || persistedData.rowid == 0 {
+ // no such online account, return empty
+ return ledgercore.OnlineAccountData{}, err
+ }
+ // Now we load the entire history of this account to fill the onlineAccountsCache, so that the
+ // next lookup for this online account will not hit the on-disk DB.
+ //
+ // lookupOnlineHistory fetches the account DB round from the acctrounds table (validThrough) to
+ // distinguish between different cases involving the last-observed value of ao.cachedDBRoundOnline.
+ // 1. Updates to ao.onlineAccountsCache happen with ao.accountsMu taken below, as well as in postCommit()
+ // 2. If we started reading the history (lookupOnlineHistory)
+ // 1. before commitRound or while it is running => OK, read what is in DB and then add new entries in postCommit
+ // * if commitRound deletes some history after, the cache has additional entries and updRound comparison gets a right value
+ // 2. after commitRound but before postCommit => OK, read full history, ignore the update from postCommit in writeFront's updRound comparison
+ // 3. after postCommit => OK, postCommit does not add new entry with writeFrontIfExist, but here all the full history is loaded
+ persistedDataHistory, validThrough, err := ao.accountsq.lookupOnlineHistory(addr)
+ if err != nil || len(persistedDataHistory) == 0 {
+ return ledgercore.OnlineAccountData{}, err
+ }
+ // 3. After we finished reading the history (lookupOnlineHistory), either
+ // 1. The DB round has not advanced (validThrough == currentDbRound) => OK
+ // 2. after commitRound but before postCommit (currentDbRound >= ao.cachedDBRoundOnline && currentDeltaLen == len(ao.deltas)) => OK
+ // the cache gets populated and postCommit updates the new entry
+ // 3. after commitRound and after postCommit => problem
+ // postCommit does not add a new entry, but the cache that would get constructed would miss the latest entry, retry
+ // In order to resolve this lookupOnlineHistory returns dbRound value (as validThrough) and determine what happened
+ // So handle cases 3.1 and 3.2 here, and 3.3 below
+ ao.accountsMu.Lock()
+ if validThrough == currentDbRound || currentDbRound >= ao.cachedDBRoundOnline && currentDeltaLen == len(ao.deltas) {
+ // not advanced or postCommit not called yet, write to the cache and return the value
+ ao.onlineAccountsCache.clear(addr)
+ if ao.onlineAccountsCache.full() {
+ ao.log.Info("onlineAccountsCache full, cannot insert")
+ } else {
+ for _, data := range persistedDataHistory {
+ written := ao.onlineAccountsCache.writeFront(
+ data.addr,
+ cachedOnlineAccount{
+ baseOnlineAccountData: data.accountData,
+ updRound: data.updRound,
+ })
+ if !written {
+ ao.accountsMu.Unlock()
+ err = fmt.Errorf("failed to write history of acct %s for round %d into online accounts cache", data.addr.String(), data.updRound)
+ return ledgercore.OnlineAccountData{}, err
+ }
+ }
+ ao.log.Info("inserted new item to onlineAccountsCache")
+ }
+ ao.accountsMu.Unlock()
+ return persistedData.accountData.GetOnlineAccountData(rewardsProto, rewardsLevel), nil
+ }
+ // case 3.3: retry (for loop iterates and queries again)
+ ao.accountsMu.Unlock()
+
+ if validThrough < currentDbRound {
+ ao.log.Errorf("onlineAccounts.lookupOnlineAccountData: database round %d is behind in-memory round %d", validThrough, currentDbRound)
+ return ledgercore.OnlineAccountData{}, &StaleDatabaseRoundError{databaseRound: validThrough, memoryRound: currentDbRound}
+ }
+ }
+}
+
+// TopOnlineAccounts returns the top n online accounts, sorted by their normalized
+// balance and address, whose voting keys are valid in voteRnd.
+// The second return value represents the total stake that is online for round == rnd, but will
+// not participate in round == voteRnd.
+// See the normalization description in AccountData.NormalizedOnlineBalance().
+// The return value of totalOnlineStake represents the total stake that is online for voteRnd: it is an approximation since voteRnd did not yet occur.
+func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Round, n uint64) (topOnlineAccounts []*ledgercore.OnlineAccount, totalOnlineStake basics.MicroAlgos, err error) {
+ genesisProto := ao.ledger.GenesisProto()
+ ao.accountsMu.RLock()
+ for {
+ currentDbRound := ao.cachedDBRoundOnline
+ currentDeltaLen := len(ao.deltas)
+ offset, err := ao.roundOffset(rnd)
+ inMemory := true
+ if err != nil {
+ var roundOffsetError *RoundOffsetError
+ if !errors.As(err, &roundOffsetError) {
+ ao.accountsMu.RUnlock()
+ return nil, basics.MicroAlgos{}, err
+ }
+ // the round number cannot be found in deltas, it is in history
+ inMemory = false
+ err = nil
+ }
+
+ modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
+ // Online accounts that will not be valid in voteRnd. Used to calculate their total stake,
+ // to be removed from the total online stake if required (lower the upper bound of total online stake in voteRnd).
+ invalidOnlineAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
+ if inMemory {
+ // Determine how many accounts have been modified in-memory,
+ // so that we obtain enough top accounts from disk (accountdb).
+ // If the *onlineAccount is nil, that means the account is offline
+ // as of the most recent change to that account, or its vote key
+ // is not valid in voteRnd. Otherwise, the *onlineAccount is the
+ // representation of the most recent state of the account, and it
+ // is online and can vote in voteRnd.
+ for o := uint64(0); o < offset; o++ {
+ for i := 0; i < ao.deltas[o].Len(); i++ {
+ addr, d := ao.deltas[o].GetByIdx(i)
+ if d.Status != basics.Online {
+ modifiedAccounts[addr] = nil
+ continue
+ }
+
+ if !(d.VoteFirstValid <= voteRnd && voteRnd <= d.VoteLastValid) {
+ modifiedAccounts[addr] = nil
+ invalidOnlineAccounts[addr] = accountDataToOnline(addr, &d, genesisProto)
+ continue
+ }
+
+ modifiedAccounts[addr] = accountDataToOnline(addr, &d, genesisProto)
+ }
+ }
+ }
+
+ ao.accountsMu.RUnlock()
+
+ // Build up a set of candidate accounts. Start by loading the
+ // top N + len(modifiedAccounts) accounts from disk (accountdb).
+ // This ensures that, even if the worst case if all in-memory
+ // changes are deleting the top accounts in accountdb, we still
+ // will have top N left.
+ //
+ // Keep asking for more accounts until we get the desired number,
+ // or there are no more accounts left.
+ candidates := make(map[basics.Address]*ledgercore.OnlineAccount)
+ batchOffset := uint64(0)
+ batchSize := uint64(1024)
+ var dbRound basics.Round
+ for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) {
+ var accts map[basics.Address]*ledgercore.OnlineAccount
+ start := time.Now()
+ ledgerAccountsonlinetopCount.Inc(nil)
+ err = ao.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ accts, err = accountsOnlineTop(tx, rnd, batchOffset, batchSize, genesisProto)
+ if err != nil {
+ return
+ }
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil)
+ if err != nil {
+ return nil, basics.MicroAlgos{}, err
+ }
+
+ if dbRound != currentDbRound {
+ break
+ }
+
+ for addr, data := range accts {
+ if !(data.VoteFirstValid <= voteRnd && voteRnd <= data.VoteLastValid) {
+ // If already exists it originated from the deltas, meaning its data is more recent
+ if _, ok := invalidOnlineAccounts[addr]; !ok {
+ invalidOnlineAccounts[addr] = data
+ }
+ continue
+ }
+ candidates[addr] = data
+ }
+
+ // If we got fewer than batchSize accounts, there are no
+ // more accounts to look at.
+ if uint64(len(accts)) < batchSize {
+ break
+ }
+
+ batchOffset += batchSize
+ }
+ // If dbRound has advanced beyond the last read of ao.cachedDBRoundOnline, postCommmit has
+ // occurred since then, so wait until deltas is consistent with dbRound and try again.
+ // dbRound will be zero if all the information needed was already found in deltas, so no DB
+ // query was made, and it is safe to let through and return.
+ if dbRound > currentDbRound && dbRound != basics.Round(0) {
+ // database round doesn't match the last au.dbRound we sampled.
+ ao.accountsMu.RLock()
+ for currentDbRound >= ao.cachedDBRoundOnline && currentDeltaLen == len(ao.deltas) {
+ ao.accountsReadCond.Wait()
+ }
+ continue
+ }
+ if dbRound < currentDbRound && dbRound != basics.Round(0) {
+ ao.log.Errorf("onlineAccounts.onlineTop: database round %d is behind in-memory round %d", dbRound, currentDbRound)
+ return nil, basics.MicroAlgos{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound}
+ }
+
+ // Now update the candidates based on the in-memory deltas.
+ for addr, oa := range modifiedAccounts {
+ if oa == nil {
+ delete(candidates, addr)
+ } else {
+ candidates[addr] = oa
+ }
+ }
+
+ // Get the top N accounts from the candidate set, by inserting all of
+ // the accounts into a heap and then pulling out N elements from the
+ // heap.
+ topHeap := &onlineTopHeap{
+ accts: nil,
+ }
+
+ for _, data := range candidates {
+ heap.Push(topHeap, data)
+ }
+
+ for topHeap.Len() > 0 && uint64(len(topOnlineAccounts)) < n {
+ acct := heap.Pop(topHeap).(*ledgercore.OnlineAccount)
+ topOnlineAccounts = append(topOnlineAccounts, acct)
+ }
+
+ totalOnlineStake, err = ao.onlineTotalsEx(rnd)
+ if err != nil {
+ return nil, basics.MicroAlgos{}, err
+ }
+ ot := basics.OverflowTracker{}
+ for _, oa := range invalidOnlineAccounts {
+ totalOnlineStake = ot.SubA(totalOnlineStake, oa.MicroAlgos)
+ if ot.Overflowed {
+ return nil, basics.MicroAlgos{}, fmt.Errorf("TopOnlineAccounts: overflow in stakeOfflineInVoteRound")
+ }
+ }
+
+ return topOnlineAccounts, totalOnlineStake, nil
+ }
+}
+
+var ledgerAccountsonlinetopCount = metrics.NewCounter("ledger_accountsonlinetop_count", "calls")
+var ledgerAccountsonlinetopMicros = metrics.NewCounter("ledger_accountsonlinetop_micros", "µs spent")
diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go
new file mode 100644
index 000000000..c1d14b43e
--- /dev/null
+++ b/ledger/acctonline_test.go
@@ -0,0 +1,1691 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func commitSync(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracker, rnd basics.Round) {
+ _, maxLookback := oa.committedUpTo(rnd)
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+ cdr := ml.trackers.produceCommittingTask(rnd, ml.trackers.dbRound, &dcc.deferredCommitRange)
+ if cdr != nil {
+ func() {
+ dcc.deferredCommitRange = *cdr
+ ml.trackers.accountsWriting.Add(1)
+
+ // do not take any locks since all operations are synchronous
+ newBase := basics.Round(dcc.offset) + dcc.oldBase
+ dcc.newBase = newBase
+ err := ml.trackers.commitRound(dcc)
+ require.NoError(t, err)
+ }()
+ }
+}
+
+// commitSyncPartial does not call postCommit
+func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracker, rnd basics.Round) *deferredCommitContext {
+ _, maxLookback := oa.committedUpTo(rnd)
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+ cdr := ml.trackers.produceCommittingTask(rnd, ml.trackers.dbRound, &dcc.deferredCommitRange)
+ if cdr != nil {
+ func() {
+ dcc.deferredCommitRange = *cdr
+ ml.trackers.accountsWriting.Add(1)
+
+ // do not take any locks since all operations are synchronous
+ newBase := basics.Round(dcc.offset) + dcc.oldBase
+ dcc.newBase = newBase
+ dcc.flushTime = time.Now()
+
+ for _, lt := range ml.trackers.trackers {
+ err := lt.prepareCommit(dcc)
+ require.NoError(t, err)
+ }
+ err := ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ for _, lt := range ml.trackers.trackers {
+ err0 := lt.commitRound(ctx, tx, dcc)
+ if err0 != nil {
+ return err0
+ }
+ }
+
+ return updateAccountsRound(tx, newBase)
+ })
+ require.NoError(t, err)
+ }()
+ }
+
+ return dcc
+}
+
+func commitSyncPartialComplete(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracker, dcc *deferredCommitContext) {
+ defer ml.trackers.accountsWriting.Done()
+
+ ml.trackers.dbRound = dcc.newBase
+ for _, lt := range ml.trackers.trackers {
+ lt.postCommit(ml.trackers.ctx, dcc)
+ }
+ ml.trackers.lastFlushTime = dcc.flushTime
+
+ for _, lt := range ml.trackers.trackers {
+ lt.postCommitUnlocked(ml.trackers.ctx, dcc)
+ }
+}
+
+func newBlock(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, prevTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
+ rewardLevel := uint64(0)
+ newTotals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(rnd),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = testProtocolVersion
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Totals = newTotals
+
+ ml.trackers.newBlock(blk, delta)
+
+ return newTotals
+}
+
+// TestAcctOnline checks the online accounts tracker correctly stores accont change history
+// 1. Start with 1000 online accounts
+// 2. Every round set one of them offline
+// 3. Ensure the DB and the base cache are up to date (report them offline)
+// 4. Ensure expiration works
+func TestAcctOnline(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const seedLookback = 2
+ const seedInteval = 3
+ const maxBalLookback = 2 * seedLookback * seedInteval
+
+ const numAccts = maxBalLookback * 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: ledgertesting.RandomOnlineAccountData(0),
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctOnline")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ protoParams.SeedLookback = seedLookback
+ protoParams.SeedRefreshInterval = seedInteval
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ maxDeltaLookback := conf.MaxAcctLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ for _, bal := range allAccts {
+ data, err := oa.accountsq.lookupOnline(bal.Addr, 0)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.Equal(t, basics.Round(0), data.round)
+ require.Equal(t, bal.AccountData.MicroAlgos, data.accountData.MicroAlgos)
+ require.Equal(t, bal.AccountData.RewardsBase, data.accountData.RewardsBase)
+ require.Equal(t, bal.AccountData.VoteFirstValid, data.accountData.VoteFirstValid)
+ require.Equal(t, bal.AccountData.VoteLastValid, data.accountData.VoteLastValid)
+
+ oad, err := oa.lookupOnlineAccountData(0, bal.Addr)
+ require.NoError(t, err)
+ require.NotEmpty(t, oad)
+ }
+
+ // online accounts tracker requires maxDeltaLookback block to start persisting
+ numPersistedAccounts := numAccts - maxDeltaLookback*2
+ targetRound := basics.Round(maxDeltaLookback + numPersistedAccounts)
+ for i := basics.Round(1); i <= targetRound; i++ {
+ var updates ledgercore.AccountDeltas
+ acctIdx := int(i) - 1
+
+ updates.Upsert(allAccts[acctIdx].Addr, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+
+ base := genesisAccts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ genesisAccts = append(genesisAccts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, i, base, updates, totals)
+
+ // commit changes synchroniously
+ commitSync(t, oa, ml, i)
+
+ // check the table data and the cache
+ // data gets committed after maxDeltaLookback
+ if i > basics.Round(maxDeltaLookback) {
+ rnd := i - basics.Round(maxDeltaLookback)
+ acctIdx := int(rnd) - 1
+ bal := allAccts[acctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.Empty(t, data.accountData)
+
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.True(t, has)
+ require.NotEmpty(t, data.rowid)
+ require.Empty(t, data.accountData)
+
+ oad, err := oa.lookupOnlineAccountData(rnd, bal.Addr)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+
+ // check the prev original row is still there
+ data, err = oa.accountsq.lookupOnline(bal.Addr, rnd-1)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.NotEmpty(t, data.accountData)
+ }
+
+ // check data gets expired and removed from the DB
+ // account 0 is set to Offline at round 1
+ // and set expired at X = 1 + MaxBalLookback (= 13)
+ // actual removal happens when X is committed i.e. at round X + maxDeltaLookback (= 21)
+ if i > basics.Round(maxBalLookback+maxDeltaLookback) {
+ rnd := i - basics.Round(maxBalLookback+maxDeltaLookback)
+ acctIdx := int(rnd) - 1
+ bal := allAccts[acctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.Empty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.Empty(t, data.accountData)
+
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.True(t, has)
+ require.NotEmpty(t, data.rowid) // TODO: FIXME: set rowid to empty for these items
+ require.Empty(t, data.accountData)
+
+ // committed round i => dbRound = i - maxDeltaLookback (= 13 for the account 0)
+ // dbRound - maxBalLookback (= 1) is the "set offline" round for account 0
+ // lookup should correctly return empty data round dbRound - maxBalLookback + 1 (simulate the latest +1)
+ oad, err := oa.lookupOnlineAccountData(rnd+1, bal.Addr)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+
+ // check next account
+ // for the account 1, it set to Offline at round 2
+ // and set expired at X = 2 + MaxBalLookback (= 14)
+ nextAcctIdx := acctIdx + 1
+ if nextAcctIdx < int(targetRound) {
+ bal := allAccts[nextAcctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.NotEmpty(t, data.accountData)
+
+ // the most recent value is empty because the account is scheduled for removal
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.True(t, has)
+ require.NotEmpty(t, data.rowid) // TODO: FIXME: set rowid to empty for these items
+ require.Empty(t, data.accountData)
+
+ // account 1 went offline at round 2 => it offline at requested round 1+1=2
+ oad, err := oa.lookupOnlineAccountData(rnd+1, bal.Addr)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+ }
+ // check next next account
+ // for the account 2, it set to Offline at round 3
+ // at round 1 + 1 = 2 it online and should te correctly retrieved from DB and lookup
+ nextNextAcctIdx := nextAcctIdx + 1
+ if nextNextAcctIdx < int(targetRound) {
+ bal := allAccts[nextNextAcctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.NotEmpty(t, data.accountData)
+
+ // the most recent value is empty because the account is scheduled for removal
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.True(t, has)
+ require.NotEmpty(t, data.rowid) // TODO: FIXME: set rowid to empty for these items
+ require.Empty(t, data.accountData)
+
+ // account 2 went offline at round 3 => it online at requested round 1+1=2
+ oad, err := oa.lookupOnlineAccountData(rnd+1, bal.Addr)
+ require.NoError(t, err)
+ require.NotEmpty(t, oad)
+ }
+ }
+ }
+
+ // ensure rounds
+ require.Equal(t, targetRound, au.latest())
+ require.Equal(t, basics.Round(numPersistedAccounts), oa.cachedDBRoundOnline)
+
+ // at this point we should have maxBalLookback last accounts of numPersistedAccounts
+ // to be in the DB and in the cache and not yet removed
+ for i := numPersistedAccounts - maxBalLookback; i < numPersistedAccounts; i++ {
+ bal := allAccts[i]
+ // we expire account i at round i+1
+ data, err := oa.accountsq.lookupOnline(bal.Addr, basics.Round(i+1))
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.Empty(t, data.accountData)
+
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.True(t, has)
+ require.NotEmpty(t, data.rowid)
+ require.Empty(t, data.accountData)
+
+ oad, err := oa.lookupOnlineAccountData(basics.Round(i+1), bal.Addr)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+
+ // ensure the online entry is still in the DB for the round i
+ data, err = oa.accountsq.lookupOnline(bal.Addr, basics.Round(i))
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.NotEmpty(t, data.accountData)
+ }
+
+ // check maxDeltaLookback accounts in in-memory deltas, check it
+ for i := numPersistedAccounts; i < numPersistedAccounts+maxDeltaLookback; i++ {
+ bal := allAccts[i]
+ oad, err := oa.lookupOnlineAccountData(basics.Round(i+1), bal.Addr)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+
+ // the table has old values b/c not committed yet
+ data, err := oa.accountsq.lookupOnline(bal.Addr, basics.Round(i))
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ require.NotEmpty(t, data.accountData)
+
+ // the base cache also does not have such entires
+ data, has := oa.baseOnlineAccounts.read(bal.Addr)
+ require.False(t, has)
+ require.Empty(t, data)
+ }
+
+ // not take some account and modify its stake.
+ // ensure it has the valid entries in both deltas and history
+ start := targetRound + 1
+ end := start + basics.Round(maxDeltaLookback+10)
+ mutAccount := allAccts[start]
+ ad := ledgercore.ToAccountData(mutAccount.AccountData)
+ const delta = 1000
+ for i := start; i <= end; i++ {
+ newAD := ad.AccountBaseData
+ newAD.MicroAlgos.Raw += uint64(i-start+1) * delta
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(
+ mutAccount.Addr,
+ ledgercore.AccountData{
+ AccountBaseData: newAD,
+ VotingData: ad.VotingData,
+ },
+ )
+
+ base := genesisAccts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ genesisAccts = append(genesisAccts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, i, base, updates, totals)
+
+ // flush all old deltas
+ if uint64(i-start+1) == maxDeltaLookback {
+ commitSync(t, oa, ml, i)
+ }
+ }
+ // flush the mutAccount
+ commitSync(t, oa, ml, end)
+
+ for i := start; i <= end; i++ {
+ oad, err := oa.lookupOnlineAccountData(basics.Round(i), mutAccount.Addr)
+ require.NoError(t, err)
+ // rewardLevel is zero => MicroAlgos == MicroAlgosWithRewards
+ expected := ad.AccountBaseData.MicroAlgos.Raw + uint64(i-start+1)*delta
+ require.Equal(t, expected, oad.MicroAlgosWithRewards.Raw)
+ }
+}
+
+// TestAcctOnlineCache toggles accounts from being online to offline and verifies
+// that the db and cache have the correct data
+func TestAcctOnlineCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const numAccts = 5
+ const maxBalLookback = 3 * numAccts
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctOnline")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ for _, val := range []uint64{4, 8} {
+ t.Run(fmt.Sprintf("lookback=%d", val), func(t *testing.T) {
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts+1)
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: ledgertesting.RandomOnlineAccountData(0),
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+
+ addrA := ledgertesting.RandomAddress()
+ acctA := ledgertesting.RandomOnlineAccountData(0)
+ genesisAccts[0][addrA] = acctA
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = val
+ maxDeltaLookback := conf.MaxAcctLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ // check cache was initialized with db state
+ for _, bal := range allAccts {
+ oad, has := oa.onlineAccountsCache.read(bal.Addr, 0)
+ require.True(t, has)
+ require.NotEmpty(t, oad)
+ }
+
+ // online accounts tracker requires maxDeltaLookback block to start persisting
+ targetRound := basics.Round(maxDeltaLookback * numAccts * 2)
+ for i := basics.Round(1); i <= targetRound; i++ {
+ var updates ledgercore.AccountDeltas
+ acctIdx := (int(i) - 1) % numAccts
+
+ // put all accts online, then all offline, one each round
+ if (int(i)-1)%(numAccts*2) >= numAccts {
+ updates.Upsert(allAccts[acctIdx].Addr, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ } else {
+ updates.Upsert(allAccts[acctIdx].Addr, ledgercore.ToAccountData(allAccts[acctIdx].AccountData))
+ }
+
+ // set acctA online for each round
+ updates.Upsert(addrA, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online}, VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(100 * i)}})
+
+ base := genesisAccts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ genesisAccts = append(genesisAccts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, i, base, updates, totals)
+
+ // commit changes synchroniously
+ commitSync(t, oa, ml, i)
+
+ // check the table data and the cache
+ // data gets committed after maxDeltaLookback
+ if i > basics.Round(maxDeltaLookback) {
+ rnd := i - basics.Round(maxDeltaLookback)
+ acctIdx := (int(rnd) - 1) % numAccts
+ bal := allAccts[acctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.NotEmpty(t, data.rowid)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, data.accountData)
+ } else {
+ require.NotEmpty(t, data.accountData)
+ }
+
+ cachedData, has := oa.onlineAccountsCache.read(bal.Addr, rnd)
+ require.True(t, has)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, cachedData.baseOnlineAccountData)
+ } else {
+ require.NotEmpty(t, cachedData.baseOnlineAccountData)
+ }
+
+ oad, err := oa.lookupOnlineAccountData(rnd, bal.Addr)
+ require.NoError(t, err)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, oad)
+ } else {
+ require.NotEmpty(t, oad)
+ }
+ }
+
+ // check data still persisted in cache and db
+ if i > basics.Round(maxBalLookback+maxDeltaLookback) {
+ rnd := i - basics.Round(maxBalLookback+maxDeltaLookback)
+ acctIdx := (int(rnd) - 1) % numAccts
+ bal := allAccts[acctIdx]
+ data, err := oa.accountsq.lookupOnline(bal.Addr, rnd)
+ require.NoError(t, err)
+ require.Equal(t, bal.Addr, data.addr)
+ require.Equal(t, oa.cachedDBRoundOnline, data.round)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, data.accountData)
+ require.Empty(t, data.rowid)
+ } else {
+ require.NotEmpty(t, data.rowid)
+ require.NotEmpty(t, data.accountData)
+ }
+
+ cachedData, has := oa.onlineAccountsCache.read(bal.Addr, rnd)
+ require.True(t, has)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, cachedData.baseOnlineAccountData)
+ } else {
+ require.NotEmpty(t, cachedData.baseOnlineAccountData)
+ }
+
+ // committed round i => dbRound = i - maxDeltaLookback
+ // lookup should correctly return data for earlist round dbRound - maxBalLookback + 1
+ oad, err := oa.lookupOnlineAccountData(rnd+1, bal.Addr)
+ require.NoError(t, err)
+ if (rnd-1)%(numAccts*2) >= numAccts {
+ require.Empty(t, oad)
+ } else {
+ require.NotEmpty(t, oad)
+ }
+ }
+ }
+
+ require.Equal(t, targetRound-basics.Round(maxDeltaLookback), oa.cachedDBRoundOnline)
+ res, validThrough, err := oa.accountsq.lookupOnlineHistory(addrA)
+ require.NoError(t, err)
+ require.Equal(t, oa.cachedDBRoundOnline, validThrough)
+ // +1 because of deletion before X, and not checking acct state at X
+ require.Equal(t, int(maxBalLookback)+1, len(res))
+ // ensure the cache length corresponds to DB
+ require.Equal(t, len(res), oa.onlineAccountsCache.accounts[addrA].Len())
+ for _, entry := range res {
+ cached, has := oa.onlineAccountsCache.read(addrA, entry.updRound)
+ require.True(t, has)
+ require.Equal(t, entry.updRound, cached.updRound)
+ require.Equal(t, entry.accountData.VoteLastValid, cached.VoteLastValid)
+ }
+
+ // ensure correct behavior after deleting cache
+ acctIdx := (int(targetRound) - 1) % numAccts
+ bal := allAccts[acctIdx]
+ delete(oa.onlineAccountsCache.accounts, bal.Addr)
+ // the account acctIdx was modified:
+ // at round targetRound - 0*numAccts and set offline (see the loop above)
+ // at round targetRound - 1*numAccts it was set online
+ // at round targetRound - 2*numAccts it was set offline...
+ // find the oldest round in DB that is online and not deleted yet
+ // 1. thus must be even cycles back
+ // 2. this should be some cycles back from persisting round that is targetRound - maxDeltaLookback
+ candidate := targetRound - basics.Round(maxDeltaLookback) - maxBalLookback
+ cycle := (targetRound - candidate) / numAccts
+ oldRound := candidate - candidate%numAccts
+ if cycle%4 != 0 {
+ oldRound += numAccts
+ }
+ expectedRound := oldRound
+ minLookupRound := targetRound - basics.Round(maxBalLookback+maxDeltaLookback) + 1
+ if oldRound < minLookupRound {
+ // if below than the min round online accounts support than adjust
+ oldRound = minLookupRound
+ }
+
+ // cache should be repopulated on this command
+ oa.lookupOnlineAccountData(oldRound, bal.Addr)
+ cachedData, has := oa.onlineAccountsCache.read(bal.Addr, oldRound)
+ require.True(t, has)
+ require.Equal(t, expectedRound, cachedData.updRound)
+ require.NotEmpty(t, cachedData.baseOnlineAccountData)
+
+ // cache should contain data for new rounds
+ // (the last entry should be offline)
+ // check at targetRound - 10 because that is the latest round written to db
+ newRound := targetRound - basics.Round(10)
+ cachedData, has = oa.onlineAccountsCache.read(bal.Addr, newRound)
+ require.True(t, has)
+ require.Equal(t, newRound, cachedData.updRound)
+ require.Empty(t, cachedData.baseOnlineAccountData)
+
+ })
+ }
+}
+
+// TestAcctOnlineRoundParamsOffset checks that roundParamsOffset return the correct indices.
+func TestAcctOnlineRoundParamsOffset(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ao := onlineAccounts{}
+
+ ao.cachedDBRoundOnline = 0
+ ao.deltas = make([]ledgercore.AccountDeltas, 10)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 11)
+ offset, err := ao.roundParamsOffset(basics.Round(6))
+ require.NoError(t, err)
+ require.Equal(t, uint64(6), offset)
+
+ ao.cachedDBRoundOnline = 3 // latest = 3 + 10 = 13
+ ao.deltas = make([]ledgercore.AccountDeltas, 10)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 11)
+ offset, err = ao.roundParamsOffset(basics.Round(6))
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), offset)
+
+ ao.cachedDBRoundOnline = 7 // latest = 9
+ ao.deltas = make([]ledgercore.AccountDeltas, 2)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 10)
+ offset, err = ao.roundParamsOffset(basics.Round(5))
+ require.NoError(t, err)
+ require.Equal(t, uint64(5), offset)
+
+ ao.cachedDBRoundOnline = 7 // latest = 9
+ ao.deltas = make([]ledgercore.AccountDeltas, 2)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 7)
+ offset, err = ao.roundParamsOffset(basics.Round(5))
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), offset)
+
+ ao.cachedDBRoundOnline = 400
+ ao.deltas = make([]ledgercore.AccountDeltas, 10)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 331)
+ offset, err = ao.roundParamsOffset(basics.Round(100))
+ require.NoError(t, err)
+ require.Equal(t, uint64(20), offset)
+
+ ao.cachedDBRoundOnline = 400
+ ao.deltas = make([]ledgercore.AccountDeltas, 10)
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 331)
+ offset, err = ao.roundParamsOffset(basics.Round(6))
+ require.Error(t, err)
+ require.Zero(t, offset)
+
+ ao.cachedDBRoundOnline = 400
+ ao.deltas = nil
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, 1)
+ offset, err = ao.roundParamsOffset(basics.Round(400))
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), offset)
+
+ ao.cachedDBRoundOnline = 400
+ ao.deltas = nil
+ ao.onlineRoundParamsData = nil
+ offset, err = ao.roundParamsOffset(basics.Round(400))
+ require.Error(t, err)
+ require.Zero(t, offset)
+}
+
+// TestAcctOnlineRoundParamsCache tests that the ao.onlineRoundParamsData cache and
+// the onlineRoundParamsData db are synced and contain the right data after a series
+// of new blocks are added to the ledger. Also ensure that these data structures are
+// trimmed properly to hold only proto.MaxBalLookback entries.
+func TestAcctOnlineRoundParamsCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ const maxBalLookback = 100
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ testProtocolVersion1 := protocol.ConsensusVersion("test-protocol-TestAcctOnline1")
+ config.Consensus[testProtocolVersion1] = protoParams
+ testProtocolVersion2 := protocol.ConsensusVersion("test-protocol-TestAcctOnline2")
+ config.Consensus[testProtocolVersion2] = protoParams
+ testProtocolVersion3 := protocol.ConsensusVersion("test-protocol-TestAcctOnline3")
+ config.Consensus[testProtocolVersion3] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion1)
+ delete(config.Consensus, testProtocolVersion2)
+ delete(config.Consensus, testProtocolVersion3)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ addSinkAndPoolAccounts(accts)
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion1, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, ao := newAcctUpdates(t, ml, conf)
+ defer au.close()
+ defer ao.close()
+
+ // cover 10 genesis blocks
+ rewardLevel := uint64(0)
+ for i := 1; i < 10; i++ {
+ accts = append(accts, accts[0])
+ }
+
+ allTotals := make(map[basics.Round]ledgercore.AccountTotals)
+
+ start := basics.Round(10)
+ end := basics.Round(2*maxBalLookback + 15)
+ for i := start; i < end; i++ {
+ consensusVersion := testProtocolVersion1
+ if i > basics.Round(maxBalLookback) {
+ consensusVersion = testProtocolVersion2
+ }
+ if i > 2*basics.Round(maxBalLookback) {
+ consensusVersion = testProtocolVersion3
+ }
+ rewardLevelDelta := crypto.RandUint64() % 3
+ rewardLevel += rewardLevelDelta
+ var updates ledgercore.AccountDeltas
+ var totals map[basics.Address]ledgercore.AccountData
+ base := accts[i-1]
+ updates, totals = ledgertesting.RandomDeltasBalanced(1, base, rewardLevel)
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+ newAccts := applyPartialDeltas(base, updates)
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = consensusVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+
+ delta.Totals = accumulateTotals(t, consensusVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
+ allTotals[i] = delta.Totals
+ ml.trackers.newBlock(blk, delta)
+ accts = append(accts, newAccts)
+
+ if i > basics.Round(maxBalLookback) && i%10 == 0 {
+ onlineTotal, err := ao.onlineTotals(i - basics.Round(maxBalLookback))
+ require.NoError(t, err)
+ require.Equal(t, allTotals[i-basics.Round(maxBalLookback)].Online.Money, onlineTotal)
+ expectedConsensusVersion := testProtocolVersion1
+ if i > 2*basics.Round(maxBalLookback) {
+ expectedConsensusVersion = testProtocolVersion2
+ }
+ roundParamsOffset, err := ao.roundParamsOffset(i - basics.Round(maxBalLookback))
+ require.NoError(t, err)
+ require.Equal(t, expectedConsensusVersion, ao.onlineRoundParamsData[roundParamsOffset].CurrentProtocol)
+ expectedConsensusVersion = testProtocolVersion2
+ if i > 2*basics.Round(maxBalLookback) {
+ expectedConsensusVersion = testProtocolVersion3
+ }
+ roundParamsOffset, err = ao.roundParamsOffset(i)
+ require.NoError(t, err)
+ require.Equal(t, expectedConsensusVersion, ao.onlineRoundParamsData[roundParamsOffset].CurrentProtocol)
+ }
+ }
+
+ ml.trackers.lastFlushTime = time.Time{}
+
+ ml.trackers.committedUpTo(2*basics.Round(maxBalLookback) + 14)
+ ml.trackers.waitAccountsWriting()
+
+ var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData
+ var endRound basics.Round
+ err := ao.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbOnlineRoundParams, endRound, err = accountsOnlineRoundParams(tx)
+ return err
+ })
+ require.NoError(t, err)
+ require.Equal(t, ao.cachedDBRoundOnline, endRound)
+ require.Equal(t, ao.onlineRoundParamsData[:basics.Round(maxBalLookback)], dbOnlineRoundParams)
+
+ for i := ml.Latest() - basics.Round(maxBalLookback); i < ml.Latest(); i++ {
+ onlineTotal, err := ao.onlineTotals(i)
+ require.NoError(t, err)
+ require.Equal(t, allTotals[i].Online.Money, onlineTotal)
+ }
+}
+
+// TestAcctOnlineCacheDBSync checks if lookup happens in between db commit and the cache update
+// the online account tracker returns correct data
+func TestAcctOnlineCacheDBSync(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const seedLookback = 2
+ const seedInteval = 3
+ const maxBalLookback = 2 * seedLookback * seedInteval
+
+ const numAccts = maxBalLookback * 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: ledgertesting.RandomOnlineAccountData(0),
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctOnlineCacheDBSync")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ protoParams.SeedLookback = seedLookback
+ protoParams.SeedRefreshInterval = seedInteval
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ addrA := allAccts[0].Addr
+
+ copyGenesisAccts := func() []map[basics.Address]basics.AccountData {
+ accounts := []map[basics.Address]basics.AccountData{{}}
+ accounts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ for addr, ad := range genesisAccts[0] {
+ accounts[0][addr] = ad
+ }
+ return accounts
+ }
+
+ // test 1: large deltas, have addrA offline in deltas, ensure it works
+ t.Run("large-delta-go-offline", func(t *testing.T) {
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = maxBalLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addrA, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+
+ // copy genesisAccts for the test
+ accounts := copyGenesisAccts()
+ base := accounts[0]
+ newAccts := applyPartialDeltas(base, updates)
+ accounts = append(accounts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, 1, base, updates, totals)
+ // commit changes synchroniously
+ commitSync(t, oa, ml, 1)
+
+ // add maxBalLookback empty blocks
+ for i := 2; i <= maxBalLookback; i++ {
+ var updates ledgercore.AccountDeltas
+ base := accounts[i-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ accounts = append(accounts, newAccts)
+ commitSync(t, oa, ml, basics.Round(i))
+ }
+ // ensure addrA is in deltas
+ macct, has := oa.accounts[addrA]
+ require.True(t, has)
+ require.Equal(t, 1, macct.ndeltas)
+ // and the cache has the prev value
+ cachedData, has := oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.NotEmpty(t, cachedData.VoteLastValid)
+ // lookup and check the value returned is offline
+ data, err := oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+
+ // commit the next block
+ // and simulate lookup in between committing the db and updating the cache
+ updates = ledgercore.AccountDeltas{}
+ rnd := maxBalLookback + 1
+ base = accounts[rnd-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(rnd), base, updates, totals)
+ dcc := commitSyncPartial(t, oa, ml, basics.Round(rnd))
+ // defer in order to recover from ml.trackers.accountsWriting.Wait()
+ defer func() {
+ // complete the commit and check lookup again
+ commitSyncPartialComplete(t, oa, ml, dcc)
+ _, has = oa.accounts[addrA]
+ require.False(t, has)
+ cachedData, has = oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.Empty(t, cachedData.VoteLastValid)
+ data, err = oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+ }()
+
+ // ensure the data still in deltas, not in the cache and lookupOnlineAccountData still return a correct value
+ macct, has = oa.accounts[addrA]
+ require.True(t, has)
+ require.Equal(t, 1, macct.ndeltas)
+ cachedData, has = oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.NotEmpty(t, cachedData.VoteLastValid)
+ data, err = oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+ })
+
+ // test 2: small deltas, have addrA offline in DB and in the cache, ensure it works
+ t.Run("small-delta-go-offline", func(t *testing.T) {
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = 4
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addrA, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+
+ // copy genesisAccts for the test
+ accounts := copyGenesisAccts()
+ base := accounts[0]
+ newAccts := applyPartialDeltas(base, updates)
+ accounts = append(accounts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, 1, base, updates, totals)
+ // commit changes synchroniously
+ commitSync(t, oa, ml, 1)
+
+ // add maxBalLookback empty blocks
+ for i := 2; i <= maxBalLookback; i++ {
+ var updates ledgercore.AccountDeltas
+ base := accounts[i-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ accounts = append(accounts, newAccts)
+ commitSync(t, oa, ml, basics.Round(i))
+ }
+ // ensure addrA not in deltas, in the cache and lookupOnlineAccountData returns a correct value
+ _, has := oa.accounts[addrA]
+ require.False(t, has)
+ cachedData, has := oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.Empty(t, cachedData.VoteLastValid)
+ data, err := oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+ })
+
+ // test 3: max deltas size = 1 => all deltas committed but not written to the cache
+ // addrA does offline, both online and offline entries gets removed from the DB but the cache
+ // must returns a correct value
+ t.Run("no-delta-go-offline-delete", func(t *testing.T) {
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+ const maxDeltaLookback = 0
+ conf.MaxAcctLookback = maxDeltaLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ addrB := ledgertesting.RandomAddress()
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addrA, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ updates.Upsert(addrB, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online}, VotingData: ledgercore.VotingData{VoteLastValid: 10000}})
+
+ // copy genesisAccts for the test
+ accounts := copyGenesisAccts()
+ base := accounts[0]
+ newAccts := applyPartialDeltas(base, updates)
+ accounts = append(accounts, newAccts)
+
+ // prepare block
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, 1, base, updates, totals)
+ // commit changes synchroniously
+ commitSync(t, oa, ml, 1)
+
+ // add maxDeltaLookback empty blocks
+ for i := 2; i <= maxBalLookback; i++ {
+ var updates ledgercore.AccountDeltas
+ base := accounts[i-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ accounts = append(accounts, newAccts)
+ commitSync(t, oa, ml, basics.Round(i))
+ }
+ // ensure addrA not in deltas, in the cache and lookupOnlineAccountData returns a correct value
+ _, has := oa.accounts[addrA]
+ require.False(t, has)
+ cachedData, has := oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.Empty(t, cachedData.VoteLastValid)
+ data, err := oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+ // ensure offline entry is in DB as well
+ pad, err := oa.accountsq.lookupOnline(addrA, 1)
+ require.NoError(t, err)
+ require.Equal(t, addrA, pad.addr)
+ require.NotEmpty(t, pad.rowid)
+ require.Empty(t, pad.accountData.VoteLastValid)
+
+ // commit a block to get these entries removed
+ // ensure the DB entry gone, the cache has it and lookupOnlineAccountData works as expected
+ updates = ledgercore.AccountDeltas{}
+ rnd := maxBalLookback + 1
+ base = accounts[rnd-1]
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(rnd), base, updates, totals)
+ dcc := commitSyncPartial(t, oa, ml, basics.Round(rnd))
+ // defer in order to recover from ml.trackers.accountsWriting.Wait()
+ defer func() {
+ // complete the commit and check lookup again
+ commitSyncPartialComplete(t, oa, ml, dcc)
+ _, has = oa.accounts[addrA]
+ require.False(t, has)
+ cachedData, has = oa.onlineAccountsCache.read(addrA, 1)
+ require.False(t, has)
+ require.Empty(t, cachedData.VoteLastValid)
+ // round 1 is out of max history
+ data, err = oa.lookupOnlineAccountData(1, addrA)
+ require.Error(t, err)
+ data, err = oa.lookupOnlineAccountData(2, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+
+ _, has = oa.onlineAccountsCache.read(addrB, 1)
+ require.True(t, has) // full history loaded when looked up addrB prev time
+ _, err = oa.lookupOnlineAccountData(1, addrB)
+ require.Error(t, err)
+ pad, err = oa.accountsq.lookupOnline(addrB, 1)
+ require.NoError(t, err)
+ require.Equal(t, addrB, pad.addr)
+ require.NotEmpty(t, pad.rowid)
+ require.NotEmpty(t, pad.accountData.VoteLastValid)
+ }()
+
+ // ensure the data not in deltas, in the cache and lookupOnlineAccountData still return a correct value
+ _, has = oa.accounts[addrA]
+ require.False(t, has)
+ cachedData, has = oa.onlineAccountsCache.read(addrA, 1)
+ require.True(t, has)
+ require.Empty(t, cachedData.VoteLastValid)
+ data, err = oa.lookupOnlineAccountData(1, addrA)
+ require.NoError(t, err)
+ require.Empty(t, data.VotingData.VoteLastValid)
+ pad, err = oa.accountsq.lookupOnline(addrA, 1)
+ require.NoError(t, err)
+ require.Equal(t, addrA, pad.addr)
+ require.Empty(t, pad.rowid)
+ require.Empty(t, pad.accountData.VoteLastValid)
+
+ _, has = oa.accounts[addrB]
+ require.False(t, has)
+ cachedData, has = oa.onlineAccountsCache.read(addrB, 1)
+ require.False(t, has) // cache miss, we do not write into the cache non-complete history after updates
+ require.Empty(t, cachedData.VoteLastValid)
+
+ data, err = oa.lookupOnlineAccountData(1, addrB)
+ require.NoError(t, err)
+ require.NotEmpty(t, data.VotingData.VoteLastValid)
+
+ pad, err = oa.accountsq.lookupOnline(addrB, 1)
+ require.NoError(t, err)
+ require.Equal(t, addrB, pad.addr)
+ require.NotEmpty(t, pad.rowid)
+ require.NotEmpty(t, pad.accountData.VoteLastValid)
+ })
+}
+
+func TestAcctOnlineVotersLongerHistory(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const seedLookback = 3
+ const seedInteval = 4
+ const maxBalLookback = 2 * seedLookback * seedInteval
+ const stateProofRounds = maxBalLookback / 2 // have it less than maxBalLookback but greater than default deltas size (8)
+ const stateProofVotersLookback = 2
+
+ const numAccts = maxBalLookback * 5
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ var addrA basics.Address
+ for i := 0; i < numAccts; i++ {
+ addr := ledgertesting.RandomAddress()
+ genesisAccts[0][addr] = ledgertesting.RandomOnlineAccountData(0)
+ if addrA.IsZero() {
+ addrA = addr
+ }
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctOnlineCacheDBSync")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ protoParams.SeedLookback = seedLookback
+ protoParams.SeedRefreshInterval = seedInteval
+ protoParams.StateProofInterval = stateProofRounds
+ protoParams.StateProofVotersLookback = stateProofVotersLookback
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ // add maxBalLookback empty blocks
+ maxBlocks := maxBalLookback * 5
+ for i := 1; i <= maxBlocks; i++ {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addrA, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online}, VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(100 * i)}})
+ base := genesisAccts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ totals = newBlock(t, ml, testProtocolVersion, protoParams, basics.Round(i), base, updates, totals)
+ genesisAccts = append(genesisAccts, newAccts)
+ commitSync(t, oa, ml, basics.Round(i))
+ }
+ require.Len(t, oa.deltas, int(conf.MaxAcctLookback))
+ require.Equal(t, basics.Round(maxBlocks-int(conf.MaxAcctLookback)), oa.cachedDBRoundOnline)
+ // voters stalls after the first interval
+ lowest := oa.voters.lowestRound(oa.cachedDBRoundOnline)
+ require.Equal(t, basics.Round(stateProofRounds-stateProofVotersLookback), lowest)
+ require.Equal(t, maxBlocks/stateProofRounds, len(oa.voters.votersForRoundCache))
+ retain, lookback := oa.committedUpTo(oa.latest())
+ require.Equal(t, lowest, retain)
+ require.Equal(t, conf.MaxAcctLookback, uint64(lookback))
+
+ // onlineRoundParamsData does not store more than maxBalLookback + deltas even if voters stall
+ require.Equal(t, uint64(len(oa.onlineRoundParamsData)), maxBalLookback+conf.MaxAcctLookback)
+
+ // DB has all the required history tho
+ var dbOnlineRoundParams []ledgercore.OnlineRoundParamsData
+ var endRound basics.Round
+ err = oa.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbOnlineRoundParams, endRound, err = accountsOnlineRoundParams(tx)
+ return err
+ })
+
+ require.NoError(t, err)
+ require.Equal(t, oa.latest()-basics.Round(conf.MaxAcctLookback), endRound)
+ require.Equal(t, maxBlocks-int(lowest)-int(conf.MaxAcctLookback)+1, len(dbOnlineRoundParams))
+
+ _, err = oa.onlineTotalsEx(lowest)
+ require.NoError(t, err)
+
+ _, err = oa.onlineTotalsEx(lowest - 1)
+ require.ErrorIs(t, err, sql.ErrNoRows)
+
+ // ensure the cache size for addrA does not have more entries than maxBalLookback + 1
+ // +1 comes from the deletion before X without checking account state at X
+ require.Equal(t, maxBalLookback+1, oa.onlineAccountsCache.accounts[addrA].Len())
+}
+
+// compareTopAccounts makes sure that accounts returned from OnlineTop function are sorted and contains the online accounts on the test
+func compareTopAccounts(a *require.Assertions, testingResult []*ledgercore.OnlineAccount, expectedAccountsBalances []basics.BalanceRecord) {
+ isSorted := sort.SliceIsSorted(testingResult, func(i, j int) bool {
+ return testingResult[i].NormalizedOnlineBalance > testingResult[j].NormalizedOnlineBalance
+ })
+ a.Equal(true, isSorted)
+
+ var onlineAccoutsFromTests []*ledgercore.OnlineAccount
+ for i := 0; i < len(expectedAccountsBalances); i++ {
+ if expectedAccountsBalances[i].Status != basics.Online {
+ continue
+ }
+ onlineAccoutsFromTests = append(onlineAccoutsFromTests, &ledgercore.OnlineAccount{
+ Address: expectedAccountsBalances[i].Addr,
+ MicroAlgos: expectedAccountsBalances[i].MicroAlgos,
+ RewardsBase: 0,
+ NormalizedOnlineBalance: expectedAccountsBalances[i].NormalizedOnlineBalance(config.Consensus[protocol.ConsensusCurrentVersion]),
+ VoteFirstValid: expectedAccountsBalances[i].VoteFirstValid,
+ VoteLastValid: expectedAccountsBalances[i].VoteLastValid})
+ }
+
+ sort.Slice(onlineAccoutsFromTests[:], func(i, j int) bool {
+ return onlineAccoutsFromTests[i].MicroAlgos.Raw > onlineAccoutsFromTests[j].MicroAlgos.Raw
+ })
+
+ for i := 0; i < len(testingResult); i++ {
+ a.Equal(*onlineAccoutsFromTests[i], *testingResult[i])
+ }
+
+}
+
+func addSinkAndPoolAccounts(genesisAccts []map[basics.Address]basics.AccountData) {
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ genesisAccts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ genesisAccts[0][testSinkAddr] = sinkdata
+}
+
+func newBlockWithUpdates(genesisAccts []map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, prevTotals ledgercore.AccountTotals, t *testing.T, ml *mockLedgerForTracker, round int, oa *onlineAccounts) ledgercore.AccountTotals {
+ base := genesisAccts[0]
+ newTotals := newBlock(t, ml, protocol.ConsensusCurrentVersion, config.Consensus[protocol.ConsensusCurrentVersion], basics.Round(round), base, updates, prevTotals)
+ commitSync(t, oa, ml, basics.Round(round))
+ return newTotals
+}
+
+func TestAcctOnlineTop(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ algops := MicroAlgoOperations{a: a}
+
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ i := 0
+ for ; i < numAccts/2; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Offline,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ for ; i < numAccts-1; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Online,
+ VoteLastValid: 1000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ // offline account with high balance
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(100000)},
+ Status: basics.Offline,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ initialOnlineTotals, err := oa.onlineTotals(0)
+ a.NoError(err)
+ top := compareOnlineTotals(a, oa, 0, 0, 5, initialOnlineTotals, initialOnlineTotals)
+ compareTopAccounts(a, top, allAccts)
+
+ _, totals, err := au.LatestTotals()
+ a.NoError(err)
+
+ // mark one of the top N accounts as offline - we expect that it will be removed form the top N
+ var updates ledgercore.AccountDeltas
+ ac := allAccts[numAccts-3]
+ updates.Upsert(ac.Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline, MicroAlgos: ac.MicroAlgos}, VotingData: ledgercore.VotingData{}})
+ totals = newBlockWithUpdates(genesisAccts, updates, totals, t, ml, 1, oa)
+ accountToBeUpdated := ac
+ accountToBeUpdated.Status = basics.Offline
+ allAccts[numAccts-3] = accountToBeUpdated
+
+ updatedOnlineStake := algops.Sub(initialOnlineTotals, ac.MicroAlgos)
+ top = compareOnlineTotals(a, oa, 1, 1, 5, updatedOnlineStake, updatedOnlineStake)
+ compareTopAccounts(a, top, allAccts)
+
+ // update an account to have expired keys
+ updates = ledgercore.AccountDeltas{}
+ updates.Upsert(allAccts[numAccts-2].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: allAccts[numAccts-2].MicroAlgos},
+ VotingData: ledgercore.VotingData{
+ VoteFirstValid: 0,
+ VoteLastValid: 1,
+ }})
+ totals = newBlockWithUpdates(genesisAccts, updates, totals, t, ml, 2, oa)
+ // we expect the previous account to be removed from the top N accounts since its keys are expired.
+ // remove it from the expected allAccts slice by marking it as offline
+ accountToBeUpdated = allAccts[numAccts-2]
+ accountToBeUpdated.Status = basics.Offline
+ allAccts[numAccts-2] = accountToBeUpdated
+
+ notValidAccountStake := accountToBeUpdated.MicroAlgos
+ voteRndExpectedOnlineStake := algops.Sub(updatedOnlineStake, notValidAccountStake)
+ top = compareOnlineTotals(a, oa, 2, 2, 5, updatedOnlineStake, voteRndExpectedOnlineStake)
+ compareTopAccounts(a, top, allAccts)
+
+ // mark an account with high stake as online - it should be pushed to the top of the list
+ updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: allAccts[numAccts-1].MicroAlgos},
+ VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(1000)}})
+ totals = newBlockWithUpdates(genesisAccts, updates, totals, t, ml, 3, oa)
+ accountToBeUpdated = allAccts[numAccts-1]
+ accountToBeUpdated.Status = basics.Online
+ accountToBeUpdated.MicroAlgos = allAccts[numAccts-1].MicroAlgos
+ accountToBeUpdated.VoteLastValid = basics.Round(1000)
+ allAccts[numAccts-1] = accountToBeUpdated
+
+ updatedOnlineStake = algops.Add(updatedOnlineStake, accountToBeUpdated.MicroAlgos)
+ voteRndExpectedOnlineStake = algops.Add(voteRndExpectedOnlineStake, accountToBeUpdated.MicroAlgos)
+ top = compareOnlineTotals(a, oa, 3, 3, 5, updatedOnlineStake, voteRndExpectedOnlineStake)
+ compareTopAccounts(a, top, allAccts)
+
+ a.Equal(top[0].Address, allAccts[numAccts-1].Addr)
+}
+
+func TestAcctOnlineTopInBatches(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const numAccts = 2048
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Online,
+ VoteLastValid: 1000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ _, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+
+ top, _, err := oa.TopOnlineAccounts(0, 0, 2048)
+ a.NoError(err)
+ compareTopAccounts(a, top, allAccts)
+}
+
+func TestAcctOnlineTopBetweenCommitAndPostCommit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Online,
+ VoteLastValid: 1000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ defer ml.Close()
+
+ stallingTracker := &blockingTracker{
+ postCommitUnlockedEntryLock: make(chan struct{}),
+ postCommitUnlockedReleaseLock: make(chan struct{}),
+ postCommitEntryLock: make(chan struct{}),
+ postCommitReleaseLock: make(chan struct{}),
+ alwaysLock: false,
+ shouldLockPostCommit: false,
+ }
+
+ conf := config.GetDefaultLocal()
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ ml.trackers.trackers = append([]ledgerTracker{stallingTracker}, ml.trackers.trackers...)
+
+ top, _, err := oa.TopOnlineAccounts(0, 0, 5)
+ a.NoError(err)
+ compareTopAccounts(a, top, allAccts)
+
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ // apply some rounds so the db round will make progress (not be 0) - i.e since the max lookback in memory is 8. deltas
+ // will get committed at round 9
+ i := 1
+ for ; i < 10; i++ {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+
+ stallingTracker.shouldLockPostCommit = true
+
+ updateAccountsRoutine := func() {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+
+ // This go routine will trigger a commit producer. we added a special blockingTracker that will case our
+ // onlineAccoutsTracker to be "stuck" between commit and Post commit .
+ // thus, when we call onlineTop - it should wait for the post commit to happen.
+ // in a different go routine we will wait 2 sec and release the commit.
+ go updateAccountsRoutine()
+
+ select {
+ case <-stallingTracker.postCommitEntryLock:
+ go func() {
+ time.Sleep(2 * time.Second)
+ stallingTracker.postCommitReleaseLock <- struct{}{}
+ }()
+ top, _, err = oa.TopOnlineAccounts(2, 2, 5)
+ a.NoError(err)
+
+ accountToBeUpdated := allAccts[numAccts-1]
+ accountToBeUpdated.Status = basics.Offline
+ allAccts[numAccts-1] = accountToBeUpdated
+
+ compareTopAccounts(a, top, allAccts)
+ case <-time.After(1 * time.Minute):
+ a.FailNow("timedout while waiting for post commit")
+ }
+}
+
+func TestAcctOnlineTopDBBehindMemRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Online,
+ VoteLastValid: 1000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ defer ml.Close()
+
+ stallingTracker := &blockingTracker{
+ postCommitUnlockedEntryLock: make(chan struct{}),
+ postCommitUnlockedReleaseLock: make(chan struct{}),
+ postCommitEntryLock: make(chan struct{}),
+ postCommitReleaseLock: make(chan struct{}),
+ alwaysLock: false,
+ shouldLockPostCommit: false,
+ }
+
+ conf := config.GetDefaultLocal()
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ ml.trackers.trackers = append([]ledgerTracker{stallingTracker}, ml.trackers.trackers...)
+
+ top, _, err := oa.TopOnlineAccounts(0, 0, 5)
+ a.NoError(err)
+ compareTopAccounts(a, top, allAccts)
+
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ // apply some rounds so the db round will make progress (not be 0) - i.e since the max lookback in memory is 8. deltas
+ // will get committed at round 9
+ i := 1
+ for ; i < 10; i++ {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+
+ stallingTracker.shouldLockPostCommit = true
+
+ updateAccountsRoutine := func() {
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(allAccts[numAccts-1].Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline}, VotingData: ledgercore.VotingData{}})
+ newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+
+ // This go routine will trigger a commit producer. we added a special blockingTracker that will case our
+ // onlineAccoutsTracker to be "stuck" between commit and Post commit .
+ // thus, when we call onlineTop - it should wait for the post commit to happen.
+ // in a different go routine we will wait 2 sec and release the commit.
+ go updateAccountsRoutine()
+
+ select {
+ case <-stallingTracker.postCommitEntryLock:
+ go func() {
+ time.Sleep(2 * time.Second)
+ // tweak the database to move backwards
+ err = oa.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ _, err = tx.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ")
+ return
+ })
+ stallingTracker.postCommitReleaseLock <- struct{}{}
+ }()
+ _, _, err = oa.TopOnlineAccounts(2, 2, 5)
+ a.Error(err)
+ a.Contains(err.Error(), "is behind in-memory round")
+
+ case <-time.After(1 * time.Minute):
+ a.FailNow("timedout while waiting for post commit")
+ }
+}
+
+func TestAcctOnlineTop_ChangeOnlineStake(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ algops := MicroAlgoOperations{a: a}
+
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ for i := 0; i < numAccts-1; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
+ Status: basics.Online,
+ VoteLastValid: 1000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+ // Online but only valid until round 1
+ allAccts[numAccts-1] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(numAccts)},
+ Status: basics.Online,
+ VoteLastValid: 1,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[numAccts-1].Addr] = allAccts[numAccts-1].AccountData
+ acctInvalidFromRnd2 := allAccts[numAccts-1]
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+
+ _, totals, err := au.LatestTotals()
+ a.NoError(err)
+
+ // Add 20 blocks (> max lookback) to test both the database and deltas
+ for i := 1; i <= 20; i++ {
+ var updates ledgercore.AccountDeltas
+ if i == 15 { // round 15 should be in deltas (memory)
+ // turn account `i` offline
+ updates.Upsert(allAccts[i].Addr, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline, MicroAlgos: allAccts[i].MicroAlgos}, VotingData: ledgercore.VotingData{}})
+ }
+ if i == 18 {
+ updates.Upsert(allAccts[i].Addr, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: allAccts[i].MicroAlgos}, VotingData: ledgercore.VotingData{VoteLastValid: basics.Round(18)}})
+ } // else: insert empty block
+ totals = newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+
+ initialOnlineStake, err := oa.onlineTotals(0)
+ a.NoError(err)
+ rnd15TotalOnlineStake := algops.Sub(initialOnlineStake, allAccts[15].MicroAlgos) // 15 is offline
+
+ // Case 1: sanity check
+ top := compareOnlineTotals(a, oa, 0, 1, 5, initialOnlineStake, initialOnlineStake)
+ compareTopAccounts(a, top, allAccts)
+
+ // Case 2: In db
+ voteRndExpectedStake := algops.Sub(initialOnlineStake, acctInvalidFromRnd2.MicroAlgos) // Online on rnd but not valid on voteRnd
+ top = compareOnlineTotals(a, oa, 0, 2, 5, initialOnlineStake, voteRndExpectedStake)
+ updatedAccts := allAccts[:numAccts-1]
+ compareTopAccounts(a, top, updatedAccts)
+
+ // Case 3: In memory (deltas)
+ voteRndExpectedStake = algops.Sub(rnd15TotalOnlineStake, acctInvalidFromRnd2.MicroAlgos)
+ voteRndExpectedStake = algops.Sub(voteRndExpectedStake, allAccts[18].MicroAlgos) // Online on rnd but not valid on voteRnd
+ updatedAccts[15].Status = basics.Offline // Mark account 15 offline for comparison
+ updatedAccts[18].Status = basics.Offline // Mark account 18 offline for comparison
+ top = compareOnlineTotals(a, oa, 18, 19, 5, rnd15TotalOnlineStake, voteRndExpectedStake)
+ compareTopAccounts(a, top, updatedAccts)
+}
+
+type MicroAlgoOperations struct {
+ a *require.Assertions
+ ot basics.OverflowTracker
+}
+
+func (m *MicroAlgoOperations) Sub(x, y basics.MicroAlgos) basics.MicroAlgos {
+ res := m.ot.SubA(x, y)
+ m.a.False(m.ot.Overflowed)
+ return res
+}
+
+func (m *MicroAlgoOperations) Add(x, y basics.MicroAlgos) basics.MicroAlgos {
+ res := m.ot.AddA(x, y)
+ m.a.False(m.ot.Overflowed)
+ return res
+}
+
+func compareOnlineTotals(a *require.Assertions, oa *onlineAccounts, rnd, voteRnd basics.Round, n uint64, expectedForRnd, expectedForVoteRnd basics.MicroAlgos) []*ledgercore.OnlineAccount {
+ top, onlineTotalVoteRnd, err := oa.TopOnlineAccounts(rnd, voteRnd, n)
+ a.NoError(err)
+ a.Equal(expectedForVoteRnd, onlineTotalVoteRnd)
+ onlineTotalsRnd, err := oa.onlineTotals(rnd)
+ a.NoError(err)
+ a.Equal(expectedForRnd, onlineTotalsRnd)
+ a.LessOrEqual(onlineTotalVoteRnd.Raw, onlineTotalsRnd.Raw)
+ return top
+}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 7ace8c55d..852df6df4 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -17,7 +17,6 @@
package ledger
import (
- "container/heap"
"context"
"database/sql"
"errors"
@@ -179,9 +178,6 @@ type accountUpdates struct {
// accountsReadCond used to synchronize read access to the internal data structures.
accountsReadCond *sync.Cond
- // voters keeps track of Merkle trees of online accounts, used for compact certificates.
- voters *votersTracker
-
// baseAccounts stores the most recently used accounts, at exactly dbRound
baseAccounts lruAccounts
@@ -196,6 +192,9 @@ type accountUpdates struct {
// lastMetricsLogTime is the time when the previous metrics logging occurred
lastMetricsLogTime time.Time
+
+ // maxAcctLookback sets the minimim deltas size to keep in memory
+ acctLookback uint64
}
// RoundOffsetError is an error for when requested round is behind earliest stored db entry
@@ -260,6 +259,8 @@ func (r resourcesUpdates) getForAddress(addr basics.Address) map[basics.Creatabl
func (au *accountUpdates) initialize(cfg config.Local) {
au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker())
+ au.acctLookback = cfg.MaxAcctLookback
+
// log metrics
au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats
au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval
@@ -281,19 +282,14 @@ func (au *accountUpdates) loadFromDisk(l ledgerForTracker, lastBalancesRound bas
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (au *accountUpdates) close() {
- if au.voters != nil {
- au.voters.close()
+ if au.accountsq != nil {
+ au.accountsq.close()
+ au.accountsq = nil
}
-
au.baseAccounts.prune(0)
au.baseResources.prune(0)
}
-// LookupOnlineAccountData returns the online account data for a given address at a given round.
-func (au *accountUpdates) LookupOnlineAccountData(rnd basics.Round, addr basics.Address) (data basics.OnlineAccountData, err error) {
- return au.lookupOnlineAccountData(rnd, addr)
-}
-
func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) {
return au.lookupResource(rnd, addr, aidx, ctype, true /* take lock */)
}
@@ -398,135 +394,6 @@ func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex,
}
}
-// onlineTop returns the top n online accounts, sorted by their normalized
-// balance and address, whose voting keys are valid in voteRnd. See the
-// normalization description in AccountData.NormalizedOnlineBalance().
-func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*ledgercore.OnlineAccount, error) {
- proto := au.ledger.GenesisProto()
- au.accountsMu.RLock()
- for {
- currentDbRound := au.cachedDBRound
- currentDeltaLen := len(au.deltas)
- offset, err := au.roundOffset(rnd)
- if err != nil {
- au.accountsMu.RUnlock()
- return nil, err
- }
-
- // Determine how many accounts have been modified in-memory,
- // so that we obtain enough top accounts from disk (accountdb).
- // If the *onlineAccount is nil, that means the account is offline
- // as of the most recent change to that account, or its vote key
- // is not valid in voteRnd. Otherwise, the *onlineAccount is the
- // representation of the most recent state of the account, and it
- // is online and can vote in voteRnd.
- modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
- for o := uint64(0); o < offset; o++ {
- for i := 0; i < au.deltas[o].Len(); i++ {
- addr, d := au.deltas[o].GetByIdx(i)
- if d.Status != basics.Online {
- modifiedAccounts[addr] = nil
- continue
- }
-
- if !(d.VoteFirstValid <= voteRnd && voteRnd <= d.VoteLastValid) {
- modifiedAccounts[addr] = nil
- continue
- }
-
- modifiedAccounts[addr] = accountDataToOnline(addr, &d, proto)
- }
- }
-
- au.accountsMu.RUnlock()
-
- // Build up a set of candidate accounts. Start by loading the
- // top N + len(modifiedAccounts) accounts from disk (accountdb).
- // This ensures that, even if the worst case if all in-memory
- // changes are deleting the top accounts in accountdb, we still
- // will have top N left.
- //
- // Keep asking for more accounts until we get the desired number,
- // or there are no more accounts left.
- candidates := make(map[basics.Address]*ledgercore.OnlineAccount)
- batchOffset := uint64(0)
- batchSize := uint64(1024)
- var dbRound basics.Round
- for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) {
- var accts map[basics.Address]*ledgercore.OnlineAccount
- start := time.Now()
- ledgerAccountsonlinetopCount.Inc(nil)
- err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- accts, err = accountsOnlineTop(tx, batchOffset, batchSize, proto)
- if err != nil {
- return
- }
- dbRound, err = accountsRound(tx)
- return
- })
- ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil)
- if err != nil {
- return nil, err
- }
-
- if dbRound != currentDbRound {
- break
- }
-
- for addr, data := range accts {
- if !(data.VoteFirstValid <= voteRnd && voteRnd <= data.VoteLastValid) {
- continue
- }
- candidates[addr] = data
- }
-
- // If we got fewer than batchSize accounts, there are no
- // more accounts to look at.
- if uint64(len(accts)) < batchSize {
- break
- }
-
- batchOffset += batchSize
- }
- if dbRound != currentDbRound && dbRound != basics.Round(0) {
- // database round doesn't match the last au.dbRound we sampled.
- au.accountsMu.RLock()
- for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
- au.accountsReadCond.Wait()
- }
- continue
- }
-
- // Now update the candidates based on the in-memory deltas.
- for addr, oa := range modifiedAccounts {
- if oa == nil {
- delete(candidates, addr)
- } else {
- candidates[addr] = oa
- }
- }
-
- // Get the top N accounts from the candidate set, by inserting all of
- // the accounts into a heap and then pulling out N elements from the
- // heap.
- topHeap := &onlineTopHeap{
- accts: nil,
- }
-
- for _, data := range candidates {
- heap.Push(topHeap, data)
- }
-
- var res []*ledgercore.OnlineAccount
- for topHeap.Len() > 0 && uint64(len(res)) < n {
- acct := heap.Pop(topHeap).(*ledgercore.OnlineAccount)
- res = append(res, acct)
- }
-
- return res, nil
- }
-}
-
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return au.getCreatorForRound(rnd, cidx, ctype, true /* take the lock */)
@@ -542,7 +409,7 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound,
defer au.accountsMu.RUnlock()
retRound = basics.Round(0)
- lookback = basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
+ lookback = basics.Round(au.acctLookback)
if committedRound < lookback {
return
}
@@ -576,10 +443,6 @@ func (au *accountUpdates) produceCommittingTask(committedRound basics.Round, dbR
au.log.Panicf("produceCommittingTask: block %d too far in the future, lookback %d, dbRound %d (cached %d), deltas %d", committedRound, dcr.lookback, dbRound, au.cachedDBRound, len(au.deltas))
}
- if au.voters != nil {
- newBase = au.voters.lowestRound(newBase)
- }
-
offset = uint64(newBase - dbRound)
offset = au.consecutiveVersion(offset)
@@ -587,6 +450,12 @@ func (au *accountUpdates) produceCommittingTask(committedRound basics.Round, dbR
// calculate the number of pending deltas
dcr.pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
+ proto := config.Consensus[au.versions[offset]]
+ dcr.catchpointLookback = proto.CatchpointLookback
+ if dcr.catchpointLookback == 0 {
+ dcr.catchpointLookback = proto.MaxBalLookback
+ }
+
// submit committing task only if offset is non-zero in addition to
// 1) no pending catchpoint writes
// 2) batching requirements meet or catchpoint round
@@ -620,13 +489,6 @@ func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta ledgercore.State
au.accountsReadCond.Broadcast()
}
-// OnlineTotals returns the online totals of all accounts at the end of round rnd.
-func (au *accountUpdates) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
- au.accountsMu.RLock()
- defer au.accountsMu.RUnlock()
- return au.onlineTotalsImpl(rnd)
-}
-
// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
func (au *accountUpdates) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
au.accountsMu.RLock()
@@ -667,6 +529,10 @@ type accountUpdatesLedgerEvaluator struct {
// au is the associated accountUpdates structure which invoking the trackerEvalVerified function, passing this structure as input.
// the accountUpdatesLedgerEvaluator would access the underlying accountUpdates function directly, bypassing the balances mutex lock.
au *accountUpdates
+ // ao is onlineAccounts for voters access
+ ao *onlineAccounts
+ // txtail allows implementation of BlockHdrCached
+ tail *txTail
// prevHeader is the previous header to the current one. The usage of this is only in the context of initializeCaches where we iteratively
// building the ledgercore.StateDelta, which requires a peek on the "previous" header information.
prevHeader bookkeeping.BlockHeader
@@ -682,9 +548,9 @@ func (aul *accountUpdatesLedgerEvaluator) GenesisProto() config.ConsensusParams
return aul.au.ledger.GenesisProto()
}
-// CompactCertVoters returns the top online accounts at round rnd.
-func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *ledgercore.VotersForRound, err error) {
- return aul.au.voters.getVoters(rnd)
+// VotersForStateProof returns the top online accounts at round rnd.
+func (aul *accountUpdatesLedgerEvaluator) VotersForStateProof(rnd basics.Round) (voters *ledgercore.VotersForRound, err error) {
+ return aul.ao.voters.getVoters(rnd)
}
// BlockHdr returns the header of the given round. When the evaluator is running, it's only referring to the previous header, which is what we
@@ -696,6 +562,16 @@ func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping.
return bookkeeping.BlockHeader{}, ledgercore.ErrNoEntry{}
}
+// BlockHdrCached returns the header of the given round. We use the txTail
+// tracker directly to avoid the tracker registry lock.
+func (aul *accountUpdatesLedgerEvaluator) BlockHdrCached(r basics.Round) (bookkeeping.BlockHeader, error) {
+ hdr, ok := aul.tail.blockHeader(r)
+ if !ok {
+ return bookkeeping.BlockHeader{}, fmt.Errorf("no cached header data for round %d", r)
+ }
+ return hdr, nil
+}
+
// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
func (aul *accountUpdatesLedgerEvaluator) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
return aul.au.latestTotalsImpl()
@@ -710,6 +586,10 @@ func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basic
// lookupWithoutRewards returns the account balance for a given address at a given round, without the reward
func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
data, validThrough, _, _, err := aul.au.lookupWithoutRewards(rnd, addr, false /*don't sync*/)
+ if err != nil {
+ return ledgercore.AccountData{}, 0, err
+ }
+
return data, validThrough, err
}
@@ -728,8 +608,11 @@ func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, c
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
}
-// onlineTotalsImpl returns the online totals of all accounts at the end of round rnd.
-func (au *accountUpdates) onlineTotalsImpl(rnd basics.Round) (basics.MicroAlgos, error) {
+// onlineTotals returns the online totals of all accounts at the end of round rnd.
+// used in tests only
+func (au *accountUpdates) onlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
+ au.accountsMu.RLock()
+ defer au.accountsMu.RUnlock()
offset, err := au.roundOffset(rnd)
if err != nil {
return basics.MicroAlgos{}, err
@@ -756,7 +639,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
start := time.Now()
ledgerAccountsinitCount.Inc(nil)
err = au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- totals, err0 := accountsTotals(tx, false)
+ totals, err0 := accountsTotals(ctx, tx, false)
if err0 != nil {
return err0
}
@@ -770,7 +653,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
return
}
- au.accountsq, err = accountsInitDbQueries(au.dbs.Rdb.Handle, au.dbs.Wdb.Handle)
+ au.accountsq, err = accountsInitDbQueries(au.dbs.Rdb.Handle)
if err != nil {
return
}
@@ -860,10 +743,6 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
-
- if au.voters != nil {
- au.voters.newBlock(blk.BlockHeader)
- }
}
// lookupLatest returns the account data for a given address for the latest round.
@@ -1094,92 +973,6 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
}
}
-// lookupWithRewards returns the online account data for a given address at a given round.
-func (au *accountUpdates) lookupOnlineAccountData(rnd basics.Round, addr basics.Address) (data basics.OnlineAccountData, err error) {
- au.accountsMu.RLock()
- needUnlock := true
- defer func() {
- if needUnlock {
- au.accountsMu.RUnlock()
- }
- }()
- var offset uint64
- var rewardsProto config.ConsensusParams
- var rewardsLevel uint64
- var persistedData persistedAccountData
- for {
- currentDbRound := au.cachedDBRound
- currentDeltaLen := len(au.deltas)
- offset, err = au.roundOffset(rnd)
- if err != nil {
- return
- }
-
- rewardsProto = config.Consensus[au.versions[offset]]
- rewardsLevel = au.roundTotals[offset].RewardsLevel
-
- // check if we've had this address modified in the past rounds. ( i.e. if it's in the deltas )
- macct, indeltas := au.accounts[addr]
- if indeltas {
- // Check if this is the most recent round, in which case, we can
- // use a cache of the most recent account state.
- if offset == uint64(len(au.deltas)) {
- return macct.data.OnlineAccountData(rewardsProto, rewardsLevel), nil
- }
- // the account appears in the deltas, but we don't know if it appears in the
- // delta range of [0..offset], so we'll need to check :
- // Traverse the deltas backwards to ensure that later updates take
- // priority if present.
- for offset > 0 {
- offset--
- d, ok := au.deltas[offset].GetData(addr)
- if ok {
- return d.OnlineAccountData(rewardsProto, rewardsLevel), nil
- }
- }
- }
-
- // check the baseAccounts -
- if macct, has := au.baseAccounts.read(addr); has && macct.round == currentDbRound {
- // we don't technically need this, since it's already in the baseAccounts, however, writing this over
- // would ensure that we promote this field.
- au.baseAccounts.writePending(macct)
- u := macct.accountData.GetLedgerCoreAccountData()
- return u.OnlineAccountData(rewardsProto, rewardsLevel), nil
- }
-
- au.accountsMu.RUnlock()
- needUnlock = false
-
- // No updates of this account in the in-memory deltas; use on-disk DB.
- // The check in roundOffset() made sure the round is exactly the one
- // present in the on-disk DB. As an optimization, we avoid creating
- // a separate transaction here, and directly use a prepared SQL query
- // against the database.
- persistedData, err = au.accountsq.lookup(addr)
- if persistedData.round == currentDbRound {
- var u ledgercore.AccountData
- if persistedData.rowid != 0 {
- // if we read actual data return it
- au.baseAccounts.writePending(persistedData)
- u = persistedData.accountData.GetLedgerCoreAccountData()
- }
- // otherwise return empty
- return u.OnlineAccountData(rewardsProto, rewardsLevel), err
- }
-
- if persistedData.round < currentDbRound {
- au.log.Errorf("accountUpdates.lookupOnlineAccountData: database round %d is behind in-memory round %d", persistedData.round, currentDbRound)
- return basics.OnlineAccountData{}, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
- }
- au.accountsMu.RLock()
- needUnlock = true
- for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
- au.accountsReadCond.Wait()
- }
- }
-}
-
func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType, synchronized bool) (data ledgercore.AccountResource, validThrough basics.Round, err error) {
needUnlock := false
if synchronized {
@@ -1483,12 +1276,8 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
au.accountsMu.RLock()
- // create a copy of the deltas, round totals and protos for the range we're going to flush.
- dcc.deltas = make([]ledgercore.AccountDeltas, offset)
- creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset)
+ // create a copy of the round totals and protos for the range we're going to flush.
dcc.roundTotals = au.roundTotals[offset]
- copy(dcc.deltas, au.deltas[:offset])
- copy(creatableDeltas, au.creatableDeltas[:offset])
// verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
if au.versions[1] != au.versions[offset] {
@@ -1498,8 +1287,8 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
// it's on a catchpoint round and the node is configured to generate catchpoints. Doing this in a deferred function
// here would prevent us from "forgetting" to update this variable later on.
// The same is repeated in commitRound on errors.
- if dcc.isCatchpointRound && dcc.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointWriting, 0)
+ if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
+ atomic.StoreInt32(dcc.catchpointDataWriting, 0)
}
return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
}
@@ -1510,9 +1299,9 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
// compact all the deltas - when we're trying to persist multiple rounds, we might have the same account
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
- dcc.compactAccountDeltas = makeCompactAccountDeltas(dcc.deltas, dcc.oldBase, setUpdateRound, au.baseAccounts)
- dcc.compactResourcesDeltas = makeCompactResourceDeltas(dcc.deltas, dcc.oldBase, setUpdateRound, au.baseAccounts, au.baseResources)
- dcc.compactCreatableDeltas = compactCreatableDeltas(creatableDeltas)
+ dcc.compactAccountDeltas = makeCompactAccountDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts)
+ dcc.compactResourcesDeltas = makeCompactResourceDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts, au.baseResources)
+ dcc.compactCreatableDeltas = compactCreatableDeltas(au.creatableDeltas[:offset])
au.accountsMu.RUnlock()
@@ -1533,8 +1322,8 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe
defer func() {
if err != nil {
- if dcc.isCatchpointRound && dcc.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointWriting, 0)
+ if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
+ atomic.StoreInt32(dcc.catchpointDataWriting, 0)
}
}
}()
@@ -1673,6 +1462,19 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
}
+ // clear the backing array to let GC collect data.
+ // this is catchpoint-related optimization if for whatever reason catchpoint generation
+ // takes longer than 500 rounds.
+ // the number chosen out of the following calculation:
+ // 300 bytes per acct in delta * 50,000 accts (full block) * 500 rounds = 7.5 GB
+ const deltasClearThreshold = 500
+ if offset > deltasClearThreshold {
+ for i := uint64(0); i < offset; i++ {
+ au.deltas[i] = ledgercore.AccountDeltas{}
+ au.creatableDeltas[i] = nil
+ }
+ }
+
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
au.versions = au.versions[offset:]
@@ -1796,8 +1598,6 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
return
}
-var ledgerAccountsonlinetopCount = metrics.NewCounter("ledger_accountsonlinetop_count", "calls")
-var ledgerAccountsonlinetopMicros = metrics.NewCounter("ledger_accountsonlinetop_micros", "µs spent")
var ledgerGetcatchpointCount = metrics.NewCounter("ledger_getcatchpoint_count", "calls")
var ledgerGetcatchpointMicros = metrics.NewCounter("ledger_getcatchpoint_micros", "µs spent")
var ledgerAccountsinitCount = metrics.NewCounter("ledger_accountsinit_count", "calls")
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index d597dfdbc..740ac91d0 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -49,14 +49,15 @@ var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
type mockLedgerForTracker struct {
- dbs db.Pair
- blocks []blockEntry
- deltas []ledgercore.StateDelta
- log logging.Logger
- filename string
- inMemory bool
- consensusParams config.ConsensusParams
- accts map[basics.Address]basics.AccountData
+ dbs db.Pair
+ blocks []blockEntry
+ deltas []ledgercore.StateDelta
+ log logging.Logger
+ filename string
+ inMemory bool
+ consensusParams config.ConsensusParams
+ consensusVersion protocol.ConsensusVersion
+ accts map[basics.Address]basics.AccountData
// trackerRegistry manages persistence into DB so we have to have it here even for a single tracker test
trackers trackerRegistry
@@ -98,8 +99,7 @@ func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBloc
Totals: totals,
}
}
- consensusParams := config.Consensus[consensusVersion]
- return &mockLedgerForTracker{dbs: dbs, log: l, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: consensusParams, accts: accts[0]}
+ return &mockLedgerForTracker{dbs: dbs, log: l, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: config.Consensus[consensusVersion], consensusVersion: consensusVersion, accts: accts[0]}
}
@@ -224,6 +224,10 @@ func (ml *mockLedgerForTracker) GenesisProto() config.ConsensusParams {
return ml.consensusParams
}
+func (ml *mockLedgerForTracker) GenesisProtoVersion() protocol.ConsensusVersion {
+ return ml.consensusVersion
+}
+
func (ml *mockLedgerForTracker) GenesisAccounts() map[basics.Address]basics.AccountData {
return ml.accts
}
@@ -257,24 +261,28 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address
return
}
-func newAcctUpdates(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *accountUpdates {
+func newAcctUpdates(tb testing.TB, l *mockLedgerForTracker, conf config.Local) (*accountUpdates, *onlineAccounts) {
au := &accountUpdates{}
au.initialize(conf)
+ ao := &onlineAccounts{}
+ ao.initialize(conf)
+
_, err := trackerDBInitialize(l, false, ".")
require.NoError(tb, err)
- l.trackers.initialize(l, []ledgerTracker{au}, conf)
+ err = l.trackers.initialize(l, []ledgerTracker{au, ao, &txTail{}}, conf)
+ require.NoError(tb, err)
err = l.trackers.loadFromDisk(l)
require.NoError(tb, err)
- return au
+ return au, ao
}
-func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, latestRnd basics.Round, accts []map[basics.Address]basics.AccountData, rewards []uint64, proto config.ConsensusParams) {
+func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base basics.Round, latestRnd basics.Round, accts []map[basics.Address]basics.AccountData, rewards []uint64, proto config.ConsensusParams) {
latest := au.latest()
require.Equal(t, latestRnd, latest)
- _, err := au.OnlineTotals(latest + 1)
+ _, err := ao.onlineTotals(latest + 1)
require.Error(t, err)
var validThrough basics.Round
@@ -283,7 +291,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.Equal(t, basics.Round(0), validThrough)
if base > 0 {
- _, err := au.OnlineTotals(base - 1)
+ _, err := ao.onlineTotals(base - basics.Round(ao.maxBalLookback()))
require.Error(t, err)
_, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
@@ -317,6 +325,14 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.NoError(t, err)
require.Equal(t, d, ledgercore.ToAccountData(data))
require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd))
+ // TODO: make lookupOnlineAccountData returning extended version of ledgercore.VotingData ?
+ od, err := ao.lookupOnlineAccountData(rnd, addr)
+ require.NoError(t, err)
+ require.Equal(t, od.VoteID, data.VoteID)
+ require.Equal(t, od.SelectionID, data.SelectionID)
+ require.Equal(t, od.VoteFirstValid, data.VoteFirstValid)
+ require.Equal(t, od.VoteLastValid, data.VoteLastValid)
+ require.Equal(t, od.VoteKeyDilution, data.VoteKeyDilution)
rewardsDelta := rewards[rnd] - d.RewardsBase
switch d.Status {
@@ -338,7 +354,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
bll := accts[rnd]
require.Equal(t, all, bll)
- totals, err := au.OnlineTotals(rnd)
+ totals, err := ao.onlineTotals(rnd)
require.NoError(t, err)
require.Equal(t, totals.Raw, totalOnline)
@@ -346,9 +362,13 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.NoError(t, err)
require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd))
require.Equal(t, d, ledgercore.AccountData{})
+ od, err := ao.lookupOnlineAccountData(rnd, ledgertesting.RandomAddress())
+ require.NoError(t, err)
+ require.Equal(t, od, ledgercore.OnlineAccountData{})
}
}
checkAcctUpdatesConsistency(t, au, latestRnd)
+ checkOnlineAcctUpdatesConsistency(t, ao, latestRnd)
}
func checkAcctUpdatesConsistency(t *testing.T, au *accountUpdates, rnd basics.Round) {
@@ -403,122 +423,166 @@ func checkAcctUpdatesConsistency(t *testing.T, au *accountUpdates, rnd basics.Ro
}
}
+func checkOnlineAcctUpdatesConsistency(t *testing.T, ao *onlineAccounts, rnd basics.Round) {
+ accounts := make(map[basics.Address]modifiedOnlineAccount)
+
+ for _, rdelta := range ao.deltas {
+ for i := 0; i < rdelta.Len(); i++ {
+ addr, adelta := rdelta.GetByIdx(i)
+ macct := accounts[addr]
+ macct.data = adelta
+ macct.ndeltas++
+ accounts[addr] = macct
+ }
+ }
+
+ require.Equal(t, ao.accounts, accounts)
+
+ latest := ao.deltas[len(ao.deltas)-1]
+ for i := 0; i < latest.Len(); i++ {
+ addr, acct := latest.GetByIdx(i)
+ od, err := ao.lookupOnlineAccountData(rnd, addr)
+ require.NoError(t, err)
+ require.Equal(t, acct.VoteID, od.VoteID)
+ require.Equal(t, acct.SelectionID, od.SelectionID)
+ require.Equal(t, acct.VoteFirstValid, od.VoteFirstValid)
+ require.Equal(t, acct.VoteLastValid, od.VoteLastValid)
+ require.Equal(t, acct.VoteKeyDilution, od.VoteKeyDilution)
+ }
+}
+
func TestAcctUpdates(t *testing.T) {
partitiontest.PartitionTest(t)
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
t.Skip("This test is too slow on ARM and causes travis builds to time out")
}
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
- rewardsLevels := []uint64{0}
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[0][testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[0][testSinkAddr] = sinkdata
+ // The next operations are heavy on the memory.
+ // Garbage collection helps prevent trashing
+ runtime.GC()
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
- defer ml.Close()
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
- defer au.close()
+ for _, lookback := range []uint64{conf.MaxAcctLookback, proto.MaxBalLookback} {
+ t.Run(fmt.Sprintf("lookback=%d", lookback), func(t *testing.T) {
- // cover 10 genesis blocks
- rewardLevel := uint64(0)
- for i := 1; i < 10; i++ {
- accts = append(accts, accts[0])
- rewardsLevels = append(rewardsLevels, rewardLevel)
- }
+ conf.MaxAcctLookback = lookback
- checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ rewardsLevels := []uint64{0}
- // lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
- knownCreatables := make(map[basics.CreatableIndex]bool)
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
- start := basics.Round(10)
- end := basics.Round(proto.MaxBalLookback + 15)
- for i := start; i < end; i++ {
- rewardLevelDelta := crypto.RandUint64() % 5
- rewardLevel += rewardLevelDelta
- var updates ledgercore.AccountDeltas
- var totals map[basics.Address]ledgercore.AccountData
- base := accts[i-1]
- updates, totals = ledgertesting.RandomDeltasBalancedFull(
- 1, base, rewardLevel, &lastCreatableID)
- prevRound, prevTotals, err := au.LatestTotals()
- require.Equal(t, i-1, prevRound)
- require.NoError(t, err)
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
- newPool := totals[testPoolAddr]
- newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
- updates.Upsert(testPoolAddr, newPool)
- totals[testPoolAddr] = newPool
- newAccts := applyPartialDeltas(base, updates)
+ initialBlocksCount := int(lookback)
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
- }
- blk.RewardsLevel = rewardLevel
- blk.CurrentProtocol = protocol.ConsensusCurrentVersion
+ au, ao := newAcctUpdates(t, ml, conf)
+ defer au.close()
+ defer ao.close()
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
- delta.Accts.MergeAccounts(updates)
- delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
+ // cover 10 genesis blocks
+ rewardLevel := uint64(0)
+ for i := 1; i < initialBlocksCount; i++ {
+ accts = append(accts, accts[0])
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ }
- delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- au.newBlock(blk, delta)
- accts = append(accts, newAccts)
- rewardsLevels = append(rewardsLevels, rewardLevel)
+ checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount-1), accts, rewardsLevels, proto)
+
+ // lastCreatableID stores asset or app max used index to get rid of conflicts
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+
+ maxLookback := conf.MaxAcctLookback
+
+ start := basics.Round(initialBlocksCount)
+ end := basics.Round(maxLookback + 15)
+ for i := start; i < end; i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ var updates ledgercore.AccountDeltas
+ var totals map[basics.Address]ledgercore.AccountData
+ base := accts[i-1]
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, &lastCreatableID)
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
+ require.NoError(t, err)
- // checkAcctUpdates is kind of slow because of amount of data it needs to compare
- // instead, compare at start, end in between approx 10 rounds
- if i == start || i == end-1 || crypto.RandUint64()%10 == 0 {
- checkAcctUpdates(t, au, 0, i, accts, rewardsLevels, proto)
- }
- }
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+ newAccts := applyPartialDeltas(base, updates)
- for i := basics.Round(0); i < 15; i++ {
- // Clear the timer to ensure a flush
- ml.trackers.lastFlushTime = time.Time{}
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = protocol.ConsensusCurrentVersion
+
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
+
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
+ ml.trackers.newBlock(blk, delta)
+ accts = append(accts, newAccts)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+
+ // checkAcctUpdates is kind of slow because of amount of data it needs to compare
+ // instead, compare at start, end in between approx 10 rounds
+ if i == start || i == end-1 || crypto.RandUint64()%10 == 0 || lookback < 10 {
+ checkAcctUpdates(t, au, ao, 0, i, accts, rewardsLevels, proto)
+ }
+ }
+ for i := basics.Round(0); i < 15; i++ {
+ // Clear the timer to ensure a flush
+ ml.trackers.lastFlushTime = time.Time{}
- ml.trackers.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
- ml.trackers.waitAccountsWriting()
- checkAcctUpdates(t, au, i, basics.Round(proto.MaxBalLookback+14), accts, rewardsLevels, proto)
- }
+ ml.trackers.committedUpTo(basics.Round(maxLookback) + i)
+ ml.trackers.waitAccountsWriting()
+ checkAcctUpdates(t, au, ao, i, basics.Round(maxLookback+14), accts, rewardsLevels, proto)
+ }
- // check the account totals.
- var dbRound basics.Round
- err := ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- dbRound, err = accountsRound(tx)
- return
- })
- require.NoError(t, err)
+ // check the account totals.
+ var dbRound basics.Round
+ err := ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ require.NoError(t, err)
- var updates ledgercore.AccountDeltas
- for addr, acctData := range accts[dbRound] {
- updates.Upsert(addr, ledgercore.ToAccountData(acctData))
- }
+ var updates ledgercore.AccountDeltas
+ for addr, acctData := range accts[dbRound] {
+ updates.Upsert(addr, ledgercore.ToAccountData(acctData))
+ }
- expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{})
- var actualTotals ledgercore.AccountTotals
- err = ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- actualTotals, err = accountsTotals(tx, false)
- return
- })
- require.NoError(t, err)
- require.Equal(t, expectedTotals, actualTotals)
+ expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{})
+ var actualTotals ledgercore.AccountTotals
+ err = ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ actualTotals, err = accountsTotals(ctx, tx, false)
+ return
+ })
+ require.NoError(t, err)
+ require.Equal(t, expectedTotals, actualTotals)
+ })
+ }
}
+
+// TestAcctUpdatesFastUpdates tests catchpoint label writing datarace
func TestAcctUpdatesFastUpdates(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -540,26 +604,33 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
- defer ml.Close()
-
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
- au := newAcctUpdates(t, ml, conf, ".")
+ initialBlocksCount := int(conf.MaxAcctLookback)
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ au, ao := newAcctUpdates(t, ml, conf)
defer au.close()
+ defer ao.close()
+
+ // Remove the txtail from the list of trackers since it causes a data race that
+ // wouldn't be observed under normal execution because commitedUpTo and newBlock
+ // are protected by the tracker mutex.
+ ml.trackers.trackers = ml.trackers.trackers[:2]
// cover 10 genesis blocks
rewardLevel := uint64(0)
- for i := 1; i < 10; i++ {
+ for i := 1; i < initialBlocksCount; i++ {
accts = append(accts, accts[0])
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
+ checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount)-1, accts, rewardsLevels, proto)
wg := sync.WaitGroup{}
- for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
+ for i := basics.Round(initialBlocksCount); i < basics.Round(proto.CatchpointLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
@@ -583,7 +654,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
@@ -593,6 +664,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
ml.trackers.committedUpTo(round)
}(i)
}
+ ml.trackers.waitAccountsWriting()
wg.Wait()
}
@@ -603,15 +675,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
if b.N < 100 {
b.N = 50
}
- protocolVersion := protocol.ConsensusVersion("BenchmarkBalancesChanges-test-protocol-version")
- testProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
- testProtocol.MaxBalLookback = 25
- config.Consensus[protocolVersion] = testProtocol
- defer func() {
- delete(config.Consensus, protocolVersion)
- }()
-
- proto := config.Consensus[protocolVersion]
+ protocolVersion := protocol.ConsensusCurrentVersion
initialRounds := uint64(1)
@@ -633,8 +697,10 @@ func BenchmarkBalancesChanges(b *testing.B) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(b, ml, conf, ".")
+ maxAcctLookback := conf.MaxAcctLookback
+ au, ao := newAcctUpdates(b, ml, conf)
defer au.close()
+ defer ao.close()
// cover initialRounds genesis blocks
rewardLevel := uint64(0)
@@ -643,12 +709,12 @@ func BenchmarkBalancesChanges(b *testing.B) {
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- for i := basics.Round(initialRounds); i < basics.Round(proto.MaxBalLookback+uint64(b.N)); i++ {
+ for i := basics.Round(initialRounds); i < basics.Round(maxAcctLookback+uint64(b.N)); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
accountChanges := 0
if i <= basics.Round(initialRounds)+basics.Round(b.N) {
- accountChanges = accountsCount - 2 - int(basics.Round(proto.MaxBalLookback+uint64(b.N))+i)
+ accountChanges = accountsCount - 2 - int(basics.Round(maxAcctLookback+uint64(b.N))+i)
}
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
@@ -672,11 +738,11 @@ func BenchmarkBalancesChanges(b *testing.B) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- for i := proto.MaxBalLookback; i < proto.MaxBalLookback+initialRounds; i++ {
+ for i := maxAcctLookback; i < maxAcctLookback+initialRounds; i++ {
// Clear the timer to ensure a flush
ml.trackers.lastFlushTime = time.Time{}
ml.trackers.committedUpTo(basics.Round(i))
@@ -684,7 +750,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
ml.trackers.waitAccountsWriting()
b.ResetTimer()
startTime := time.Now()
- for i := proto.MaxBalLookback + initialRounds; i < proto.MaxBalLookback+uint64(b.N); i++ {
+ for i := maxAcctLookback + initialRounds; i < maxAcctLookback+uint64(b.N); i++ {
// Clear the timer to ensure a flush
ml.trackers.lastFlushTime = time.Time{}
ml.trackers.committedUpTo(basics.Round(i))
@@ -699,7 +765,6 @@ func BenchmarkBalancesChanges(b *testing.B) {
b.N = int(time.Second / singleIterationTime)
// and now, wait for the reminder of the second.
time.Sleep(time.Second - deltaTime)
-
}
func BenchmarkCalibrateNodesPerPage(b *testing.B) {
@@ -732,6 +797,7 @@ func BenchmarkCalibrateCacheNodeSize(b *testing.B) {
func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Skip("TODO: move to catchpointtracker_test and add catchpoint tracker into trackers list")
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
t.Skip("This test is too slow on ARM and causes travis builds to time out")
}
@@ -743,6 +809,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestLargeAccountCountCatchpointGeneration")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ // TODO: fix MaxBalLookback after updating catchpoint round
protoParams.MaxBalLookback = 32
protoParams.SeedLookback = 2
protoParams.SeedRefreshInterval = 8
@@ -765,23 +832,26 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, accts)
- defer ml.Close()
-
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au := newAcctUpdates(t, ml, conf, ".")
+ initialBlocksCount := int(conf.MaxAcctLookback)
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion, accts)
+ defer ml.Close()
+
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
// cover 10 genesis blocks
rewardLevel := uint64(0)
- for i := 1; i < 10; i++ {
+ for i := 1; i < initialBlocksCount; i++ {
accts = append(accts, accts[0])
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- for i := basics.Round(10); i < basics.Round(protoParams.MaxBalLookback+5); i++ {
+ start := basics.Round(initialBlocksCount)
+ end := basics.Round(protoParams.MaxBalLookback + 5)
+ for i := start; i < end; i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
@@ -806,12 +876,12 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
ml.trackers.committedUpTo(i)
- if i%2 == 1 {
+ if i%2 == 1 || i == end-1 {
ml.trackers.waitAccountsWriting()
}
}
@@ -835,14 +905,8 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
}
// create new protocol version, which has lower look back.
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesUpdatesCorrectness")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 5
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
+ testProtocolVersion := protocol.ConsensusCurrentVersion
+ maxAcctLookback := config.GetDefaultLocal().MaxAcctLookback
inMemory := true
testFunction := func(t *testing.T) {
@@ -879,7 +943,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
}
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
// cover 10 genesis blocks
@@ -926,10 +990,10 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
moneyAccountsExpectedAmounts[i][0] = moneyAccountsExpectedAmounts[i-1][0] + uint64(len(moneyAccounts)-1)*uint64(i-10)
// force to perform a test that goes directly to disk, and see if it has the expected values.
- if uint64(i) > protoParams.MaxBalLookback+3 {
+ if uint64(i) > maxAcctLookback+3 {
// check the status at a historical time:
- checkRound := uint64(i) - protoParams.MaxBalLookback - 2
+ checkRound := uint64(i) - maxAcctLookback - 2
testback := 1
for j := 1; j < len(moneyAccounts); j++ {
@@ -971,7 +1035,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
for addr, ad := range updates {
delta.Accts.Upsert(addr, ad)
}
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
ml.trackers.committedUpTo(i)
}
lastRound := i - 1
@@ -1125,14 +1189,14 @@ func TestListCreatables(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
accts := make(map[basics.Address]basics.AccountData)
- _ = accountsInitTest(t, tx, accts, proto)
+ _ = accountsInitTest(t, tx, accts, protocol.ConsensusCurrentVersion)
require.NoError(t, err)
err = accountsAddNormalizedBalance(tx, proto)
require.NoError(t, err)
au := &accountUpdates{}
- au.accountsq, err = accountsInitDbQueries(tx, tx)
+ au.accountsq, err = accountsInitDbQueries(tx)
require.NoError(t, err)
// ******* All results are obtained from the cache. Empty database *******
@@ -1238,7 +1302,7 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
- au := newAcctUpdates(b, ml, cfg, ".")
+ au, _ := newAcctUpdates(b, ml, cfg)
defer au.close()
// at this point, the database was created. We want to fill the accounts data
@@ -1261,7 +1325,7 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
}
err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- return updateAccountsHashRound(tx, 1)
+ return updateAccountsHashRound(ctx, tx, 1)
})
require.NoError(b, err)
@@ -1556,7 +1620,6 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
runtime.GC()
protocolVersion := protocol.ConsensusCurrentVersion
- proto := config.Consensus[protocolVersion]
initialRounds := uint64(1)
accountsCount := 5
@@ -1579,7 +1642,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
// cover initialRounds genesis blocks
rewardLevel := uint64(0)
@@ -1588,7 +1651,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- recoveredLedgerRound := basics.Round(initialRounds + initializeCachesRoundFlushInterval + proto.MaxBalLookback + 1)
+ recoveredLedgerRound := basics.Round(initialRounds + initializeCachesRoundFlushInterval + conf.MaxAcctLookback + 1)
for i := basics.Round(initialRounds); i <= recoveredLedgerRound; i++ {
rewardLevelDelta := crypto.RandUint64() % 5
@@ -1618,7 +1681,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
ml.trackers.committedUpTo(basics.Round(i))
ml.trackers.waitAccountsWriting()
accts = append(accts, newAccts)
@@ -1639,12 +1702,12 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
ml2.deltas = ml.deltas
conf = config.GetDefaultLocal()
- au = newAcctUpdates(t, ml2, conf, ".")
+ au, _ = newAcctUpdates(t, ml2, conf)
defer au.close()
// make sure the deltas array end up containing only the most recent 320 rounds.
- require.Equal(t, int(proto.MaxBalLookback), len(au.deltas))
- require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.cachedDBRound)
+ require.Equal(t, int(conf.MaxAcctLookback), len(au.deltas))
+ require.Equal(t, recoveredLedgerRound-basics.Round(conf.MaxAcctLookback), au.cachedDBRound)
// Garbage collection helps prevent trashing for next tests
runtime.GC()
@@ -1655,7 +1718,6 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
partitiontest.PartitionTest(t)
initProtocolVersion := protocol.ConsensusV20
- initialProtoParams := config.Consensus[initProtocolVersion]
initialRounds := uint64(1)
@@ -1678,10 +1740,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
-
- err := au.loadFromDisk(ml, 0)
- require.NoError(t, err)
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
// cover initialRounds genesis blocks
@@ -1722,15 +1781,16 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
newVersionBlocksCount := uint64(47)
newVersion := protocol.ConsensusV21
- // add 47 more rounds that contains blocks using a newer consensus version, and stuff it with MaxBalLookback
- lastRoundToWrite := basics.Round(initialRounds + initialProtoParams.MaxBalLookback + extraRounds + newVersionBlocksCount)
+ maxAcctLookback := conf.MaxAcctLookback
+ // add 47 more rounds that contains blocks using a newer consensus version, and stuff it with maxAcctLookback
+ lastRoundToWrite := basics.Round(initialRounds + maxAcctLookback + extraRounds + newVersionBlocksCount)
for i := basics.Round(initialRounds + extraRounds); i < lastRoundToWrite; i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
@@ -1759,7 +1819,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1770,13 +1830,12 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
}
-// TestAcctUpdatesSplittingConsensusVersionCommitsBoundry tests the a sequence of commits that spans over multiple consensus versions works correctly, and
+// TestAcctUpdatesSplittingConsensusVersionCommitsBoundary tests the a sequence of commits that spans over multiple consensus versions works correctly, and
// in particular, complements TestAcctUpdatesSplittingConsensusVersionCommits by testing the commit boundary.
-func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
+func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) {
partitiontest.PartitionTest(t)
initProtocolVersion := protocol.ConsensusV20
- initialProtoParams := config.Consensus[initProtocolVersion]
initialRounds := uint64(1)
@@ -1799,10 +1858,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
-
- err := au.loadFromDisk(ml, 0)
- require.NoError(t, err)
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
// cover initialRounds genesis blocks
@@ -1843,14 +1899,15 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
newVersion := protocol.ConsensusV21
- // add MaxBalLookback-extraRounds more rounds that contains blocks using a newer consensus version.
- endOfFirstNewProtocolSegment := basics.Round(initialRounds + extraRounds + initialProtoParams.MaxBalLookback)
+ maxAcctLockback := conf.MaxAcctLookback
+ // add maxAcctLockback-extraRounds more rounds that contains blocks using a newer consensus version.
+ endOfFirstNewProtocolSegment := basics.Round(initialRounds + extraRounds + maxAcctLockback)
for i := basics.Round(initialRounds + extraRounds); i <= endOfFirstNewProtocolSegment; i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
@@ -1879,7 +1936,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1889,7 +1946,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound)
// write additional extraRounds elements and verify these can be flushed.
- for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+initialProtoParams.MaxBalLookback); i++ {
+ for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+maxAcctLockback); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
accountChanges := 2
@@ -1917,7 +1974,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1942,21 +1999,14 @@ func TestAcctUpdatesResources(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesResources")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 2
- protoParams.SeedLookback = 1
- protoParams.SeedRefreshInterval = 1
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
+ testProtocolVersion := protocol.ConsensusCurrentVersion
+ protoParams := config.Consensus[testProtocolVersion]
ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
var addr1 basics.Address
@@ -1973,6 +2023,8 @@ func TestAcctUpdatesResources(t *testing.T) {
}
}
+ maxAcctLookback := conf.MaxAcctLookback
+
aidx := basics.AssetIndex(1)
aidx2 := basics.AssetIndex(2)
aidx3 := basics.AppIndex(3)
@@ -1980,10 +2032,10 @@ func TestAcctUpdatesResources(t *testing.T) {
rewardLevel := uint64(0)
knownCreatables := make(map[basics.CreatableIndex]bool)
- // the test 1 requires 3 blocks with different resource state, au requires MaxBalLookback block to start persisting
+ // the test 1 requires 3 blocks with different resource state, au requires maxAcctLookback blocks to start persisting
// the test 2 requires 2 more blocks
// the test 2 requires 2 more blocks
- for i := basics.Round(1); i <= basics.Round(protoParams.MaxBalLookback+3+2+2); i++ {
+ for i := basics.Round(1); i <= basics.Round(maxAcctLookback+3+2+2); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
var updates ledgercore.AccountDeltas
@@ -2061,7 +2113,7 @@ func TestAcctUpdatesResources(t *testing.T) {
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = newTotals
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
// commit changes synchroniously
_, maxLookback := au.committedUpTo(i)
@@ -2157,10 +2209,8 @@ func TestAcctUpdatesLookupLatest(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
- err := au.loadFromDisk(ml, 0)
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
- require.NoError(t, err)
for addr, acct := range accts {
acctData, validThrough, withoutRewards, err := au.lookupLatest(addr)
require.NoError(t, err)
@@ -2184,13 +2234,8 @@ func TestAcctUpdatesLookupLatest(t *testing.T) {
// In this case it waits on a condition variable and retries when
// commitSyncer/accountUpdates has advanced the cachedDBRound.
func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, accts []map[basics.Address]basics.AccountData, rnd basics.Round, proto config.ConsensusParams, rewardsLevels []uint64)) {
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesLookupRetry")
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- proto.MaxBalLookback = 10
- config.Consensus[testProtocolVersion] = proto
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
+ testProtocolVersion := protocol.ConsensusCurrentVersion
+ proto := config.Consensus[testProtocolVersion]
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
rewardsLevels := []uint64{0}
@@ -2205,27 +2250,29 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- ml := makeMockLedgerForTracker(t, false, 10, testProtocolVersion, accts)
+ conf := config.GetDefaultLocal()
+ initialBlocksCount := int(conf.MaxAcctLookback)
+ ml := makeMockLedgerForTracker(t, false, initialBlocksCount, testProtocolVersion, accts)
defer ml.Close()
- conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, ao := newAcctUpdates(t, ml, conf)
defer au.close()
+ defer ao.close()
// cover 10 genesis blocks
rewardLevel := uint64(0)
- for i := 1; i < 10; i++ {
+ for i := 1; i < initialBlocksCount; i++ {
accts = append(accts, accts[0])
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
+ checkAcctUpdates(t, au, ao, 0, basics.Round(initialBlocksCount)-1, accts, rewardsLevels, proto)
// lastCreatableID stores asset or app max used index to get rid of conflicts
lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
- for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
+ for i := basics.Round(initialBlocksCount); i < basics.Round(conf.MaxAcctLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
var updates ledgercore.AccountDeltas
@@ -2255,18 +2302,18 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
delta.Accts.MergeAccounts(updates)
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = accumulateTotals(t, testProtocolVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- au.newBlock(blk, delta)
+ ml.trackers.newBlock(blk, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
- checkAcctUpdates(t, au, 0, i, accts, rewardsLevels, proto)
+ checkAcctUpdates(t, au, ao, 0, i, accts, rewardsLevels, proto)
}
flushRound := func(i basics.Round) {
// Clear the timer to ensure a flush
ml.trackers.lastFlushTime = time.Time{}
- ml.trackers.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
+ ml.trackers.committedUpTo(basics.Round(conf.MaxAcctLookback) + i)
ml.trackers.waitAccountsWriting()
}
@@ -2305,7 +2352,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
// issue a lookupWithoutRewards while persistedData.round != au.cachedDBRound
// when synchronized=false it will fail fast
_, _, _, _, err := au.lookupWithoutRewards(rnd, basics.Address{}, false)
- require.Equal(t, err, &MismatchingDatabaseRoundError{databaseRound: 2, memoryRound: 1})
+ require.Equal(t, &MismatchingDatabaseRoundError{databaseRound: 2, memoryRound: 1}, err)
// release the postCommit lock, once au.lookupWithoutRewards hits au.accountsReadCond.Wait()
go func() {
@@ -2355,6 +2402,7 @@ func TestAcctUpdatesLookupRetry(t *testing.T) {
d, validThrough, _, _, err := au.lookupWithoutRewards(rnd, addr, true)
require.NoError(t, err)
require.Equal(t, d, ledgercore.ToAccountData(data))
+ // TODO: add online account data check
require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), "validThrough: %v rnd :%v", validThrough, rnd)
})
}
@@ -2449,21 +2497,14 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
sinkdata.Status = basics.NotParticipating
accts[0][testSinkAddr] = sinkdata
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesLookupLatestCacheRetry")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 2
- protoParams.SeedLookback = 1
- protoParams.SeedRefreshInterval = 1
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
+ testProtocolVersion := protocol.ConsensusCurrentVersion
+ protoParams := config.Consensus[testProtocolVersion]
ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, accts)
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
var addr1 basics.Address
@@ -2479,7 +2520,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
knownCreatables := make(map[basics.CreatableIndex]bool)
// the test 1 requires 2 blocks with different resource state, au requires MaxBalLookback block to start persisting
- for i := basics.Round(1); i <= basics.Round(protoParams.MaxBalLookback+2); i++ {
+ for i := basics.Round(1); i <= basics.Round(conf.MaxAcctLookback+2); i++ {
var updates ledgercore.AccountDeltas
// add data
@@ -2503,7 +2544,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
// ensure rounds
rnd := au.latest()
- require.Equal(t, basics.Round(protoParams.MaxBalLookback+2), rnd)
+ require.Equal(t, basics.Round(conf.MaxAcctLookback+2), rnd)
require.Equal(t, basics.Round(2), au.cachedDBRound)
oldCachedDBRound := au.cachedDBRound
@@ -2601,7 +2642,7 @@ func TestAcctUpdatesLookupResources(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
var addr1 basics.Address
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index d96a9a659..b8fc6ad36 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -143,15 +143,15 @@ return`
// the difference between these encoded structure is the UpdateRound variable. This variable is not being set before
// the consensus upgrade, and affects only nodes that have been updated.
if proto.EnableAccountDataResourceSeparation {
- expectedCreatorBase, err = hex.DecodeString("85a16101a162ce009d2290a16704a16b01a17a01")
+ expectedCreatorBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce009d2290a16704a16b01a17a01")
a.NoError(err)
expectedCreatorResource, err = hex.DecodeString("86a171c45602200200012604056c6f63616c06676c6f62616c026c6b02676b3118221240003331192212400010311923124000022243311b221240001c361a00281240000a361a0029124000092243222a28664200032b29672343a172c40102a17501a17704a17903a17a01")
a.NoError(err)
- expectedUserOptInBase, err = hex.DecodeString("85a16101a162ce00a02fd0a16701a16c01a17a02")
+ expectedUserOptInBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce00a02fd0a16701a16c01a17a02")
a.NoError(err)
expectedUserOptInResource, err = hex.DecodeString("82a16f01a17a02")
a.NoError(err)
- expectedUserLocalBase, err = hex.DecodeString("85a16101a162ce00a33540a16701a16c01a17a04")
+ expectedUserLocalBase, err = hex.DecodeString("87a14301a144ce000186a0a16101a162ce00a33540a16701a16c01a17a04")
a.NoError(err)
expectedUserLocalResource, err = hex.DecodeString("83a16f01a17081a26c6b82a27462a56c6f63616ca2747401a17a04")
a.NoError(err)
@@ -296,7 +296,8 @@ return`
a.Equal("local", ar.AppLocalState.KeyValue["lk"].Bytes)
// ensure writing into empty global state works as well
- l.reloadLedger()
+ err = l.reloadLedger()
+ a.NoError(err)
txHeader.Sender = creator
appCallFields = transactions.ApplicationCallTxnFields{
OnCompletion: 0,
@@ -1234,3 +1235,70 @@ int 1
})
}
}
+
+// TestLogicSigValidation tests that LogicSig-signed transactions can be validated properly.
+func TestLogicSigValidation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ source := `#pragma version 6
+int 1
+`
+
+ a := require.New(t)
+ ops, err := logic.AssembleString(source)
+ a.NoError(err)
+ a.Greater(len(ops.Program), 1)
+ program := ops.Program
+ pd := logic.HashProgram(program)
+ lsigAddr := basics.Address(pd)
+
+ funder, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
+ a.NoError(err)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ a.Contains(genesisInitState.Accounts, funder)
+
+ cfg := config.GetDefaultLocal()
+ l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
+ a.NoError(err)
+ defer l.Close()
+
+ genesisID := t.Name()
+ txHeader := transactions.Header{
+ Sender: funder,
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
+ FirstValid: l.Latest() + 1,
+ LastValid: l.Latest() + 10,
+ GenesisID: genesisID,
+ GenesisHash: genesisInitState.GenesisHash,
+ }
+
+ // fund lsig account
+ fundingPayment := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: txHeader,
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: lsigAddr,
+ Amount: basics.MicroAlgos{Raw: proto.MinBalance + proto.MinTxnFee},
+ },
+ }
+ err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, fundingPayment, transactions.ApplyData{})
+ a.NoError(err)
+
+ // send 0 Algos from lsig account to self
+ txHeader.Sender = lsigAddr
+ lsigPayment := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: txHeader,
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: lsigAddr,
+ },
+ }
+ signedLsigPayment := transactions.SignedTxn{
+ Lsig: transactions.LogicSig{Logic: program},
+ Txn: lsigPayment,
+ }
+ err = l.appendUnvalidatedSignedTx(t, genesisInitState.Accounts, signedLsigPayment, transactions.ApplyData{})
+ a.NoError(err)
+}
diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go
index 9acd48454..34017e383 100644
--- a/ledger/apply/apply.go
+++ b/ledger/apply/apply.go
@@ -19,11 +19,19 @@ package apply
import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
+// StateProofsApplier allows fetching and updating state-proofs state on the ledger
+type StateProofsApplier interface {
+ BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error)
+ GetStateProofNextRound() basics.Round
+ SetStateProofNextRound(rnd basics.Round)
+}
+
// Balances allow to move MicroAlgos from one address to another and to update balance records, or to access and modify individual balance records
// After a call to Put (or Move), future calls to Get or Move will reflect the updated balance record(s)
type Balances interface {
diff --git a/ledger/apply/keyreg.go b/ledger/apply/keyreg.go
index 8cffc1805..4c7155d71 100644
--- a/ledger/apply/keyreg.go
+++ b/ledger/apply/keyreg.go
@@ -45,13 +45,18 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal
return fmt.Errorf("cannot change online/offline status of non-participating account %v", header.Sender)
}
+ params := balances.ConsensusParams()
+
// Update the registered keys and mark account as online
// (or, if the voting or selection keys are zero, offline/not-participating)
record.VoteID = keyreg.VotePK
record.SelectionID = keyreg.SelectionPK
+ if params.EnableStateProofKeyregCheck {
+ record.StateProofID = keyreg.StateProofPK
+ }
if (keyreg.VotePK == crypto.OneTimeSignatureVerifier{} || keyreg.SelectionPK == crypto.VRFVerifier{}) {
if keyreg.Nonparticipation {
- if balances.ConsensusParams().SupportBecomeNonParticipatingTransactions {
+ if params.SupportBecomeNonParticipatingTransactions {
record.Status = basics.NotParticipating
} else {
return fmt.Errorf("transaction tries to mark an account as nonparticipating, but that transaction is not supported")
@@ -63,8 +68,7 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal
record.VoteLastValid = 0
record.VoteKeyDilution = 0
} else {
-
- if balances.ConsensusParams().EnableKeyregCoherencyCheck {
+ if params.EnableKeyregCoherencyCheck {
if keyreg.VoteLast <= round {
return errKeyregGoingOnlineExpiredParticipationKey
}
@@ -76,9 +80,6 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal
record.VoteFirstValid = keyreg.VoteFirst
record.VoteLastValid = keyreg.VoteLast
record.VoteKeyDilution = keyreg.VoteKeyDilution
- if balances.ConsensusParams().EnableStateProofKeyregCheck {
- record.StateProofID = keyreg.StateProofPK
- }
}
// Write the updated entry
diff --git a/ledger/apply/keyreg_test.go b/ledger/apply/keyreg_test.go
index 96ba42a4a..09699e3f3 100644
--- a/ledger/apply/keyreg_test.go
+++ b/ledger/apply/keyreg_test.go
@@ -229,6 +229,33 @@ func TestStateProofPKKeyReg(t *testing.T) {
acct, err = mockBal.Get(tx.Src(), false)
require.NoError(t, err)
require.False(t, acct.StateProofID.IsEmpty())
+
+ // go offline in current consensus version: StateProofID should be empty
+ emptyKeyreg := transactions.KeyregTxnFields{}
+ err = Keyreg(emptyKeyreg, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
+ require.NoError(t, err)
+
+ acct, err = mockBal.Get(tx.Src(), false)
+ require.NoError(t, err)
+ require.True(t, acct.StateProofID.IsEmpty())
+
+ // run same test using vFuture
+ mockBal = makeMockBalances(protocol.ConsensusFuture)
+ err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
+ require.NoError(t, err)
+
+ acct, err = mockBal.Get(tx.Src(), false)
+ require.NoError(t, err)
+ require.False(t, acct.StateProofID.IsEmpty())
+
+ // go offline in vFuture: StateProofID should be empty
+ err = Keyreg(emptyKeyreg, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0))
+ require.NoError(t, err)
+
+ acct, err = mockBal.Get(tx.Src(), false)
+ require.NoError(t, err)
+ require.True(t, acct.StateProofID.IsEmpty())
+
}
func createTestTxn(t *testing.T, src basics.Address, secretParticipation *crypto.SignatureSecrets, vrfSecrets *crypto.VRFSecrets) transactions.Transaction {
@@ -255,7 +282,7 @@ func createTestTxnWithPeriod(t *testing.T, src basics.Address, secretParticipati
KeyregTxnFields: transactions.KeyregTxnFields{
VotePK: crypto.OneTimeSignatureVerifier(secretParticipation.SignatureVerifier),
SelectionPK: vrfSecrets.PK,
- StateProofPK: *signer.GetVerifier(),
+ StateProofPK: signer.GetVerifier().Commitment,
VoteFirst: 0,
VoteLast: 100,
},
diff --git a/ledger/apply/stateproof.go b/ledger/apply/stateproof.go
new file mode 100644
index 000000000..fca56c0f3
--- /dev/null
+++ b/ledger/apply/stateproof.go
@@ -0,0 +1,71 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package apply
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof/verify"
+)
+
+// Errors for apply stateproof
+var (
+ ErrStateProofTypeNotSupported = errors.New("state proof type not supported")
+ ErrExpectedDifferentStateProofRound = errors.New("expected different state proof round")
+)
+
+// StateProof applies the StateProof transaction and setting the next StateProof round
+func StateProof(tx transactions.StateProofTxnFields, atRound basics.Round, sp StateProofsApplier, validate bool) error {
+ spType := tx.StateProofType
+ if spType != protocol.StateProofBasic {
+ return fmt.Errorf("applyStateProof: %w - type %d ", ErrStateProofTypeNotSupported, spType)
+ }
+
+ lastRoundInInterval := basics.Round(tx.Message.LastAttestedRound)
+ lastRoundHdr, err := sp.BlockHdr(lastRoundInInterval)
+ if err != nil {
+ return err
+ }
+
+ nextStateProofRnd := sp.GetStateProofNextRound()
+ if nextStateProofRnd == 0 || nextStateProofRnd != lastRoundInInterval {
+ return fmt.Errorf("applyStateProof: %w - expecting state proof for %d, but new state proof is for %d",
+ ErrExpectedDifferentStateProofRound, nextStateProofRnd, lastRoundInInterval)
+ }
+
+ proto := config.Consensus[lastRoundHdr.CurrentProtocol]
+ if validate {
+ votersRnd := lastRoundInInterval.SubSaturate(basics.Round(proto.StateProofInterval))
+ votersHdr, err := sp.BlockHdr(votersRnd)
+ if err != nil {
+ return err
+ }
+
+ err = verify.ValidateStateProof(&lastRoundHdr, &tx.StateProof, &votersHdr, atRound, &tx.Message)
+ if err != nil {
+ return err
+ }
+ }
+
+ sp.SetStateProofNextRound(lastRoundInInterval + basics.Round(proto.StateProofInterval))
+ return nil
+}
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index 7b59a7850..cb6dc4ae5 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -21,9 +21,7 @@ import (
"crypto/rand"
"database/sql"
"fmt"
- "io/ioutil"
mathrand "math/rand"
- "os"
"path/filepath"
"reflect"
"runtime"
@@ -97,6 +95,10 @@ func (wl *wrappedLedger) GenesisProto() config.ConsensusParams {
return wl.l.GenesisProto()
}
+func (wl *wrappedLedger) GenesisProtoVersion() protocol.ConsensusVersion {
+ return wl.l.GenesisProtoVersion()
+}
+
func (wl *wrappedLedger) GenesisAccounts() map[basics.Address]basics.AccountData {
return wl.l.GenesisAccounts()
}
@@ -194,11 +196,8 @@ func TestArchivalRestart(t *testing.T) {
deadlock.Opts.Disable = deadlockDisable
}()
- dbTempDir, err := ioutil.TempDir("", "testdir"+t.Name())
- require.NoError(t, err)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- dbPrefix := filepath.Join(dbTempDir, dbName)
- defer os.RemoveAll(dbTempDir)
+ dbPrefix := filepath.Join(t.TempDir(), dbName)
genesisInitState := getInitState()
const inMem = false // use persistent storage
@@ -344,11 +343,8 @@ func TestArchivalCreatables(t *testing.T) {
deadlock.Opts.Disable = deadlockDisable
}()
- dbTempDir, err := ioutil.TempDir("", "testdir"+t.Name())
- require.NoError(t, err)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- dbPrefix := filepath.Join(dbTempDir, dbName)
- defer os.RemoveAll(dbTempDir)
+ dbPrefix := filepath.Join(t.TempDir(), dbName)
genesisInitState := getInitState()
@@ -376,7 +372,7 @@ func TestArchivalCreatables(t *testing.T) {
var creators []basics.Address
for i := 0; i < maxBlocks; i++ {
creator := basics.Address{}
- _, err = rand.Read(creator[:])
+ _, err := rand.Read(creator[:])
require.NoError(t, err)
creators = append(creators, creator)
genesisInitState.Accounts[creator] = basics.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1234567890})
@@ -697,11 +693,9 @@ func TestArchivalFromNonArchival(t *testing.T) {
defer func() {
deadlock.Opts.Disable = deadlockDisable
}()
- dbTempDir, err := ioutil.TempDir(os.TempDir(), "testdir")
- require.NoError(t, err)
+
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- dbPrefix := filepath.Join(dbTempDir, dbName)
- defer os.RemoveAll(dbTempDir)
+ dbPrefix := filepath.Join(t.TempDir(), dbName)
genesisInitState := getInitState()
@@ -714,7 +708,7 @@ func TestArchivalFromNonArchival(t *testing.T) {
for i := 0; i < 50; i++ {
addr := basics.Address{}
- _, err = rand.Read(addr[:])
+ _, err := rand.Read(addr[:])
require.NoError(t, err)
br := basics.BalanceRecord{AccountData: basics.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1234567890}), Addr: addr}
genesisInitState.Accounts[addr] = br.AccountData
@@ -801,7 +795,7 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
wl.l.trackerMu.RLock()
defer wl.l.trackerMu.RUnlock()
for _, trk := range wl.l.trackers.trackers {
- if au, ok := trk.(*accountUpdates); ok {
+ if _, ok := trk.(*accountUpdates); ok {
wl.l.trackers.waitAccountsWriting()
minSave, _ = trk.committedUpTo(rnd)
wl.l.trackers.committedUpTo(rnd)
@@ -814,7 +808,7 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
trackerType = reflect.TypeOf(trk).Elem()
cleanTracker = reflect.New(trackerType).Interface().(ledgerTracker)
- au = cleanTracker.(*accountUpdates)
+ au := cleanTracker.(*accountUpdates)
cfg := config.GetDefaultLocal()
cfg.Archival = true
au.initialize(cfg)
diff --git a/ledger/blockHeaderCache.go b/ledger/blockHeaderCache.go
new file mode 100644
index 000000000..5e2be47d2
--- /dev/null
+++ b/ledger/blockHeaderCache.go
@@ -0,0 +1,86 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+
+ "github.com/algorand/go-deadlock"
+)
+
+const latestHeaderCacheSize = 512
+const blockHeadersLRUCacheSize = 10
+
+// blockHeaderCache is a wrapper for all block header cache mechanisms used within the Ledger.
+type blockHeaderCache struct {
+ lruCache heapLRUCache
+ latestHeaderCache latestBlockHeaderCache
+}
+
+type latestBlockHeaderCache struct {
+ blockHeaders [latestHeaderCacheSize]bookkeeping.BlockHeader
+ mutex deadlock.RWMutex
+}
+
+func (c *blockHeaderCache) initialize() {
+ c.lruCache.maxEntries = blockHeadersLRUCacheSize
+}
+
+func (c *blockHeaderCache) get(round basics.Round) (blockHeader bookkeeping.BlockHeader, exists bool) {
+ // check latestHeaderCache first
+ blockHeader, exists = c.latestHeaderCache.get(round)
+ if exists {
+ return
+ }
+
+ // if not found in latestHeaderCache, check LRUCache
+ value, exists := c.lruCache.Get(round)
+ if exists {
+ blockHeader = value.(bookkeeping.BlockHeader)
+ }
+
+ return
+}
+
+func (c *blockHeaderCache) put(blockHeader bookkeeping.BlockHeader) {
+ c.latestHeaderCache.put(blockHeader)
+ c.lruCache.Put(blockHeader.Round, blockHeader)
+}
+
+func (c *latestBlockHeaderCache) get(round basics.Round) (blockHeader bookkeeping.BlockHeader, exists bool) {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ idx := round % latestHeaderCacheSize
+ if round == 0 || c.blockHeaders[idx].Round != round { // blockHeader is empty or not requested round
+ return bookkeeping.BlockHeader{}, false
+ }
+ blockHeader = c.blockHeaders[idx]
+
+ return blockHeader, true
+}
+
+func (c *latestBlockHeaderCache) put(blockHeader bookkeeping.BlockHeader) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ idx := blockHeader.Round % latestHeaderCacheSize
+ if blockHeader.Round > c.blockHeaders[idx].Round { // provided blockHeader is more recent than cached one
+ c.blockHeaders[idx] = blockHeader
+ }
+}
diff --git a/ledger/blockHeaderCache_test.go b/ledger/blockHeaderCache_test.go
new file mode 100644
index 000000000..a5fac5ae6
--- /dev/null
+++ b/ledger/blockHeaderCache_test.go
@@ -0,0 +1,94 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestBlockHeaderCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var cache blockHeaderCache
+ cache.initialize()
+ for i := basics.Round(1024); i < 1024+latestHeaderCacheSize; i++ {
+ hdr := bookkeeping.BlockHeader{Round: i}
+ cache.put(hdr)
+ }
+
+ rnd := basics.Round(120)
+ hdr := bookkeeping.BlockHeader{Round: rnd}
+ cache.put(hdr)
+
+ _, exists := cache.get(rnd)
+ a.True(exists)
+
+ _, exists = cache.lruCache.Get(rnd)
+ a.True(exists)
+
+ _, exists = cache.latestHeaderCache.get(rnd)
+ a.False(exists)
+
+ rnd = basics.Round(2048)
+ hdr = bookkeeping.BlockHeader{Round: rnd}
+ cache.put(hdr)
+
+ _, exists = cache.latestHeaderCache.get(rnd)
+ a.True(exists)
+
+ _, exists = cache.lruCache.Get(rnd)
+ a.True(exists)
+
+}
+
+func TestLatestBlockHeaderCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var cache latestBlockHeaderCache
+ for i := basics.Round(123); i < latestHeaderCacheSize; i++ {
+ hdr := bookkeeping.BlockHeader{Round: i}
+ cache.put(hdr)
+ }
+
+ for i := basics.Round(0); i < 123; i++ {
+ _, exists := cache.get(i)
+ a.False(exists)
+ }
+
+ for i := basics.Round(123); i < latestHeaderCacheSize; i++ {
+ hdr, exists := cache.get(i)
+ a.True(exists)
+ a.Equal(i, hdr.Round)
+ }
+}
+
+func TestCacheSizeConsensus(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ a.Equal(uint64(latestHeaderCacheSize), config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*2)
+}
diff --git a/ledger/catchpointfileheader.go b/ledger/catchpointfileheader.go
new file mode 100644
index 000000000..741e13aa0
--- /dev/null
+++ b/ledger/catchpointfileheader.go
@@ -0,0 +1,38 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// CatchpointFileHeader is the content we would have in the "content.msgpack" file in the catchpoint tar archive.
+// we need it to be public, as it's being decoded externally by the catchpointdump utility.
+type CatchpointFileHeader struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Version uint64 `codec:"version"`
+ BalancesRound basics.Round `codec:"balancesRound"`
+ BlocksRound basics.Round `codec:"blocksRound"`
+ Totals ledgercore.AccountTotals `codec:"accountTotals"`
+ TotalAccounts uint64 `codec:"accountsCount"`
+ TotalChunks uint64 `codec:"chunksCount"`
+ Catchpoint string `codec:"catchpoint"`
+ BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"`
+}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index f4c3e9ea5..0cfc9089d 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -17,11 +17,15 @@
package ledger
import (
+ "archive/tar"
+ "compress/gzip"
"context"
"database/sql"
"encoding/binary"
"encoding/hex"
+ "errors"
"fmt"
+ "io"
"os"
"path/filepath"
"strconv"
@@ -30,6 +34,7 @@ import (
"time"
"github.com/algorand/go-deadlock"
+ "github.com/golang/snappy"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -62,6 +67,13 @@ const (
trieAccumulatedChangesFlush = 256
// CatchpointDirName represents the directory name in which all the catchpoints files are stored
CatchpointDirName = "catchpoints"
+
+ // CatchpointFileVersionV5 is the catchpoint file version that was used when the database schema was V0-V5.
+ CatchpointFileVersionV5 = uint64(0200)
+ // CatchpointFileVersionV6 is the catchpoint file version that is matching database schema V6.
+ // This version introduced accounts and resources separation. The first catchpoint
+ // round of this version is >= `reenableCatchpointsRound`.
+ CatchpointFileVersionV6 = uint64(0201)
)
// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
@@ -72,11 +84,25 @@ var TrieMemoryConfig = merkletrie.MemoryConfig{
MaxChildrenPagesThreshold: 64,
}
+func catchpointStage1Encoder(w io.Writer) (io.WriteCloser, error) {
+ return snappy.NewBufferedWriter(w), nil
+}
+
+type snappyReadCloser struct {
+ *snappy.Reader
+}
+
+func (snappyReadCloser) Close() error { return nil }
+
+func catchpointStage1Decoder(r io.Reader) (io.ReadCloser, error) {
+ return snappyReadCloser{snappy.NewReader(r)}, nil
+}
+
type catchpointTracker struct {
// dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated
dbDirectory string
- // catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
+ // catchpointInterval is the configured interval at which the catchpointTracker would generate catchpoint labels and catchpoint files.
catchpointInterval uint64
// catchpointFileHistoryLength defines how many catchpoint files we want to store back.
@@ -99,65 +125,75 @@ type catchpointTracker struct {
// note that this is the last catchpoint *label* and not the catchpoint file.
lastCatchpointLabel string
- // catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
- // when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
- // otherwise, it would take it's time and perform periodic sleeps between chunks processing.
- catchpointSlowWriting chan struct{}
+ // catchpointDataSlowWriting suggests to the accounts writer that it should finish
+ // writing up the (first stage) catchpoint data file ASAP. When this channel is
+ // closed, the accounts writer would try and complete the writing as soon as possible.
+ // Otherwise, it would take its time and perform periodic sleeps between chunks
+ // processing.
+ catchpointDataSlowWriting chan struct{}
- // catchpointWriting help to synchronize the catchpoint file writing. When this atomic variable is 0, no writing is going on.
+ // catchpointDataWriting helps to synchronize the (first stage) catchpoint data file
+ // writing. When this atomic variable is 0, no writing is going on.
// Any non-zero value indicates a catchpoint being written, or scheduled to be written.
- catchpointWriting int32
+ catchpointDataWriting int32
// The Trie tracking the current account balances. Always matches the balances that were
// written to the database.
balancesTrie *merkletrie.Trie
- // catchpointsMu is the synchronization mutex for accessing the various non-static variables.
- catchpointsMu deadlock.RWMutex
-
- // roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
+ // roundDigest stores the digest of the block for every round starting with dbRound+1 and every round after it.
roundDigest []crypto.Digest
- // accountDataResourceSeparationRound is a round where the EnableAccountDataResourceSeparation feature was enabled via the consensus.
+ // reenableCatchpointsRound is a round where the EnableOnlineAccountCatchpoints feature was enabled via the consensus.
// we avoid generating catchpoints before that round in order to ensure the network remain consistent in the catchpoint
// label being produced. This variable could be "wrong" in two cases -
- // 1. It's zero, meaning that the EnableAccountDataResourceSeparation has yet to be seen.
- // 2. It's non-zero meaning that it the given round is after the EnableAccountDataResourceSeparation was enabled ( it might be exact round
+ // 1. It's zero, meaning that the EnableOnlineAccountCatchpoints has yet to be seen.
+ // 2. It's non-zero meaning that it the given round is after the EnableOnlineAccountCatchpoints was enabled ( it might be exact round
// but that's only if newBlock was called with that round ), plus the lookback.
- accountDataResourceSeparationRound basics.Round
+ reenableCatchpointsRound basics.Round
// forceCatchpointFileWriting used for debugging purpose by bypassing the test against
- // accountDataResourceSeparationRound in isCatchpointRound(), so that we could generate
+ // reenableCatchpointsRound in isCatchpointRound(), so that we could generate
// catchpoint files even before the protocol upgrade took place.
forceCatchpointFileWriting bool
+
+ // catchpointsMu protects `roundDigest`, `reenableCatchpointsRound` and
+ // `lastCatchpointLabel`.
+ catchpointsMu deadlock.RWMutex
}
// initialize initializes the catchpointTracker structure
func (ct *catchpointTracker) initialize(cfg config.Local, dbPathPrefix string) {
ct.dbDirectory = filepath.Dir(dbPathPrefix)
- ct.enableGeneratingCatchpointFiles = cfg.Archival
+
switch cfg.CatchpointTracking {
case -1:
- ct.catchpointInterval = 0
+ // No catchpoints.
default:
- // give a warning, then fall thought
+ // Give a warning, then fall through to case 0.
logging.Base().Warnf("catchpointTracker: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
fallthrough
case 0:
- if ct.enableGeneratingCatchpointFiles {
+ if cfg.Archival && (cfg.CatchpointInterval > 0) {
ct.catchpointInterval = cfg.CatchpointInterval
- } else {
- ct.catchpointInterval = 0
+ ct.enableGeneratingCatchpointFiles = true
}
case 1:
- ct.catchpointInterval = cfg.CatchpointInterval
+ if cfg.CatchpointInterval > 0 {
+ ct.catchpointInterval = cfg.CatchpointInterval
+ ct.enableGeneratingCatchpointFiles = cfg.Archival
+ }
case 2:
- ct.catchpointInterval = cfg.CatchpointInterval
- ct.enableGeneratingCatchpointFiles = true
+ if cfg.CatchpointInterval > 0 {
+ ct.catchpointInterval = cfg.CatchpointInterval
+ ct.enableGeneratingCatchpointFiles = true
+ }
case forceCatchpointFileGenerationTrackingMode:
- ct.catchpointInterval = cfg.CatchpointInterval
- ct.enableGeneratingCatchpointFiles = true
- ct.forceCatchpointFileWriting = true
+ if cfg.CatchpointInterval > 0 {
+ ct.catchpointInterval = cfg.CatchpointInterval
+ ct.enableGeneratingCatchpointFiles = true
+ ct.forceCatchpointFileWriting = true
+ }
}
ct.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
@@ -173,72 +209,155 @@ func (ct *catchpointTracker) GetLastCatchpointLabel() string {
return ct.lastCatchpointLabel
}
-// loadFromDisk loads the state of a tracker from persistent
-// storage. The ledger argument allows loadFromDisk to load
-// blocks from the database, or access its own state. The
-// ledgerForTracker interface abstracts away the details of
-// ledger internals so that individual trackers can be tested
-// in isolation.
-func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
- ct.log = l.trackerLog()
- ct.dbs = l.trackerDB()
+func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, updatingBalancesDuration time.Duration) error {
+ ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound)
- ct.roundDigest = nil
- ct.catchpointWriting = 0
- // keep these channel closed if we're not generating catchpoint
- ct.catchpointSlowWriting = make(chan struct{}, 1)
- close(ct.catchpointSlowWriting)
+ var totalAccounts uint64
+ var totalChunks uint64
+ var biggestChunkLen uint64
- err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- err0 := ct.accountsInitializeHashes(ctx, tx, lastBalancesRound)
- if err0 != nil {
- return err0
+ if ct.enableGeneratingCatchpointFiles {
+ // Generate the catchpoint file. This need to be done inline so that it will
+ // block any new accounts that from being written. generateCatchpointData()
+ // expects that the accounts data would not be modified in the background during
+ // it's execution.
+ var err error
+ totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
+ ctx, dbRound, updatingBalancesDuration)
+ atomic.StoreInt32(&ct.catchpointDataWriting, 0)
+ if err != nil {
+ return err
}
+ }
+
+ f := func(ctx context.Context, tx *sql.Tx) error {
+ err := ct.recordFirstStageInfo(ctx, tx, dbRound, totalAccounts, totalChunks, biggestChunkLen)
+ if err != nil {
+ return err
+ }
+
+ // Clear the db record.
+ return writeCatchpointStateUint64(ctx, tx, catchpointStateWritingFirstStageInfo, 0)
+ }
+ return ct.dbs.Wdb.Atomic(f)
+}
+
+// Possibly finish generating first stage catchpoint db record and data file after
+// a crash.
+func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) error {
+ v, err := readCatchpointStateUint64(
+ context.Background(), ct.dbs.Rdb.Handle, catchpointStateWritingFirstStageInfo)
+ if err != nil {
+ return err
+ }
+ if v == 0 {
return nil
- })
+ }
+ // First, delete the unfinished data file.
+ relCatchpointDataFilePath := filepath.Join(
+ CatchpointDirName,
+ makeCatchpointDataFilePath(dbRound))
+ err = removeSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath)
if err != nil {
return err
}
- ct.accountsq, err = accountsInitDbQueries(ct.dbs.Rdb.Handle, ct.dbs.Wdb.Handle)
+ return ct.finishFirstStage(context.Background(), dbRound, 0)
+}
+
+func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint64) error {
+ records, err := selectUnfinishedCatchpoints(context.Background(), ct.dbs.Rdb.Handle)
if err != nil {
- return
+ return err
}
- ct.lastCatchpointLabel, _, err = ct.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
+ for _, record := range records {
+ // First, delete the unfinished catchpoint file.
+ relCatchpointFilePath := filepath.Join(
+ CatchpointDirName,
+ makeCatchpointFilePath(basics.Round(record.round)))
+ err = removeSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath)
+ if err != nil {
+ return err
+ }
+
+ err = ct.finishCatchpoint(
+ context.Background(), record.round, record.blockHash, catchpointLookback)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error {
+ err := ct.finishFirstStageAfterCrash(dbRound)
if err != nil {
- return
+ return err
}
- writingCatchpointRound, _, err := ct.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
+ ctx := context.Background()
+
+ catchpointLookback, err := readCatchpointStateUint64(
+ ctx, ct.dbs.Rdb.Handle, catchpointStateCatchpointLookback)
if err != nil {
return err
}
- if writingCatchpointRound == 0 || !ct.catchpointEnabled() {
- return nil
+
+ if catchpointLookback != 0 {
+ err = ct.finishCatchpointsAfterCrash(catchpointLookback)
+ if err != nil {
+ return err
+ }
+
+ if uint64(dbRound) >= catchpointLookback {
+ err := ct.pruneFirstStageRecordsData(ctx, dbRound-basics.Round(catchpointLookback))
+ if err != nil {
+ return err
+ }
+ }
}
- var dbRound basics.Round
- // make sure that the database is at the desired round.
- err = ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- dbRound, err = accountsRound(tx)
- return
+
+ return nil
+}
+
+// loadFromDisk loads the state of a tracker from persistent
+// storage. The ledger argument allows loadFromDisk to load
+// blocks from the database, or access its own state. The
+// ledgerForTracker interface abstracts away the details of
+// ledger internals so that individual trackers can be tested
+// in isolation.
+func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Round) (err error) {
+ ct.log = l.trackerLog()
+ ct.dbs = l.trackerDB()
+
+ ct.roundDigest = nil
+ ct.catchpointDataWriting = 0
+ // keep these channel closed if we're not generating catchpoint
+ ct.catchpointDataSlowWriting = make(chan struct{}, 1)
+ close(ct.catchpointDataSlowWriting)
+
+ err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return ct.accountsInitializeHashes(ctx, tx, dbRound)
})
if err != nil {
return err
}
- if dbRound != basics.Round(writingCatchpointRound) {
- return nil
+
+ ct.accountsq, err = accountsInitDbQueries(ct.dbs.Rdb.Handle)
+ if err != nil {
+ return
}
- blk, err := l.Block(dbRound)
+ ct.lastCatchpointLabel, err = readCatchpointStateString(
+ context.Background(), ct.dbs.Rdb.Handle, catchpointStateLastCatchpoint)
if err != nil {
- return err
+ return
}
- blockHeaderDigest := blk.Digest()
- ct.generateCatchpoint(context.Background(), basics.Round(writingCatchpointRound), ct.lastCatchpointLabel, blockHeaderDigest, time.Duration(0))
- return nil
+ return ct.recoverFromCrash(dbRound)
}
// newBlock informs the tracker of a new block from round
@@ -246,12 +365,16 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, lastBalancesRound
func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
ct.catchpointsMu.Lock()
defer ct.catchpointsMu.Unlock()
+
ct.roundDigest = append(ct.roundDigest, blk.Digest())
- if config.Consensus[blk.CurrentProtocol].EnableAccountDataResourceSeparation && ct.accountDataResourceSeparationRound == 0 {
- ct.accountDataResourceSeparationRound = blk.BlockHeader.Round + basics.Round(config.Consensus[blk.CurrentProtocol].MaxBalLookback)
+ if (config.Consensus[blk.CurrentProtocol].EnableOnlineAccountCatchpoints || ct.forceCatchpointFileWriting) && ct.reenableCatchpointsRound == 0 {
+ catchpointLookback := config.Consensus[blk.CurrentProtocol].CatchpointLookback
+ if catchpointLookback == 0 {
+ catchpointLookback = config.Consensus[blk.CurrentProtocol].MaxBalLookback
+ }
+ ct.reenableCatchpointsRound = blk.BlockHeader.Round + basics.Round(catchpointLookback)
}
-
}
// committedUpTo implements the ledgerTracker interface for catchpointTracker.
@@ -263,65 +386,102 @@ func (ct *catchpointTracker) committedUpTo(rnd basics.Round) (retRound, lookback
return rnd, basics.Round(0)
}
-func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
- var hasMultipleIntermediateCatchpoint, hasIntermediateCatchpoint bool
+// Calculate whether we have intermediate first stage catchpoint rounds and the
+// new offset.
+func calculateFirstStageRounds(oldBase basics.Round, offset uint64, reenableCatchpointsRound basics.Round, catchpointInterval uint64, catchpointLookback uint64) (hasIntermediateFirstStageRound bool, hasMultipleIntermediateFirstStageRounds bool, newOffset uint64) {
+ newOffset = offset
- newBase := dcr.oldBase + basics.Round(dcr.offset)
+ if reenableCatchpointsRound == 0 {
+ return
+ }
- // check if there was a catchpoint between dcc.oldBase+lookback and dcc.oldBase+offset+lookback
- if ct.catchpointInterval > 0 {
- nextCatchpointRound := ((uint64(dcr.oldBase+dcr.lookback) + ct.catchpointInterval) / ct.catchpointInterval) * ct.catchpointInterval
+ minFirstStageRound := oldBase + 1
+ if (reenableCatchpointsRound > basics.Round(catchpointLookback)) &&
+ (reenableCatchpointsRound-basics.Round(catchpointLookback) >
+ minFirstStageRound) {
+ minFirstStageRound =
+ reenableCatchpointsRound - basics.Round(catchpointLookback)
+ }
- if nextCatchpointRound < uint64(dcr.oldBase+dcr.lookback)+dcr.offset {
- mostRecentCatchpointRound := (uint64(committedRound) / ct.catchpointInterval) * ct.catchpointInterval
- newBase = basics.Round(nextCatchpointRound) - dcr.lookback
- if mostRecentCatchpointRound > nextCatchpointRound {
- hasMultipleIntermediateCatchpoint = true
- // skip if there is more than one catchpoint in queue
- newBase = basics.Round(mostRecentCatchpointRound) - dcr.lookback
- }
- hasIntermediateCatchpoint = true
+ // The smallest integer r >= minFirstStageRound such that
+ // (r + catchpointLookback) % ct.catchpointInterval == 0.
+ first := (int64(minFirstStageRound)+int64(catchpointLookback)+
+ int64(catchpointInterval)-1)/
+ int64(catchpointInterval)*int64(catchpointInterval) -
+ int64(catchpointLookback)
+ // The largest integer r <= dcr.oldBase + dcr.offset such that
+ // (r + catchpointLookback) % ct.catchpointInterval == 0.
+ last := (int64(oldBase)+int64(offset)+int64(catchpointLookback))/
+ int64(catchpointInterval)*int64(catchpointInterval) - int64(catchpointLookback)
+
+ if first <= last {
+ hasIntermediateFirstStageRound = true
+ // We skip earlier catchpoints if there is more than one to generate.
+ newOffset = uint64(last) - uint64(oldBase)
+
+ if first < last {
+ hasMultipleIntermediateFirstStageRounds = true
}
}
+ return
+}
+
+func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ if ct.catchpointInterval == 0 {
+ return dcr
+ }
+
+ ct.catchpointsMu.Lock()
+ reenableCatchpointsRound := ct.reenableCatchpointsRound
+ ct.catchpointsMu.Unlock()
+
+ // Check if we need to do the first stage of catchpoint generation.
+ var hasIntermediateFirstStageRound bool
+ var hasMultipleIntermediateFirstStageRounds bool
+ hasIntermediateFirstStageRound, hasMultipleIntermediateFirstStageRounds, dcr.offset =
+ calculateFirstStageRounds(
+ dcr.oldBase, dcr.offset, reenableCatchpointsRound,
+ ct.catchpointInterval, dcr.catchpointLookback)
// if we're still writing the previous balances, we can't move forward yet.
- if ct.IsWritingCatchpointFile() {
+ if ct.IsWritingCatchpointDataFile() {
// if we hit this path, it means that we're still writing a catchpoint.
// see if the new delta range contains another catchpoint.
- if hasIntermediateCatchpoint {
+ if hasIntermediateFirstStageRound {
// check if we're already attempting to perform fast-writing.
select {
- case <-ct.catchpointSlowWriting:
+ case <-ct.catchpointDataSlowWriting:
// yes, we're already doing fast-writing.
default:
// no, we're not yet doing fast writing, make it so.
- close(ct.catchpointSlowWriting)
+ close(ct.catchpointDataSlowWriting)
}
}
return nil
}
- newOffset := uint64(newBase - dcr.oldBase)
- // trackers are not allowed to increase offsets, only descease
- if newOffset < dcr.offset {
- dcr.offset = newOffset
- }
-
- // check to see if this is a catchpoint round
- dcr.isCatchpointRound = ct.isCatchpointRound(dcr.offset, dcr.oldBase, dcr.lookback)
+ if hasIntermediateFirstStageRound {
+ dcr.catchpointFirstStage = true
- if dcr.isCatchpointRound && ct.enableGeneratingCatchpointFiles {
- // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
- atomic.StoreInt32(&ct.catchpointWriting, int32(-1))
- ct.catchpointSlowWriting = make(chan struct{}, 1)
- if hasMultipleIntermediateCatchpoint {
- close(ct.catchpointSlowWriting)
+ if ct.enableGeneratingCatchpointFiles {
+ // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
+ atomic.StoreInt32(&ct.catchpointDataWriting, int32(-1))
+ ct.catchpointDataSlowWriting = make(chan struct{}, 1)
+ if hasMultipleIntermediateFirstStageRounds {
+ close(ct.catchpointDataSlowWriting)
+ }
}
}
- dcr.catchpointWriting = &ct.catchpointWriting
+ dcr.catchpointDataWriting = &ct.catchpointDataWriting
dcr.enableGeneratingCatchpointFiles = ct.enableGeneratingCatchpointFiles
+ {
+ rounds := calculateCatchpointRounds(
+ dcr.oldBase+1, dcr.oldBase+basics.Round(dcr.offset), ct.catchpointInterval)
+ dcr.catchpointSecondStage = (len(rounds) > 0)
+ }
+
return dcr
}
@@ -330,9 +490,10 @@ func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round,
func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error {
ct.catchpointsMu.RLock()
defer ct.catchpointsMu.RUnlock()
- if dcc.isCatchpointRound {
- dcc.committedRoundDigest = ct.roundDigest[dcc.offset+uint64(dcc.lookback)-1]
- }
+
+ dcc.committedRoundDigests = make([]crypto.Digest, dcc.offset)
+ copy(dcc.committedRoundDigests, ct.roundDigest[:dcc.offset])
+
return nil
}
@@ -342,10 +503,9 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d
dbRound := dcc.oldBase
defer func() {
- if err != nil {
- if dcc.isCatchpointRound && ct.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(&ct.catchpointWriting, 0)
- }
+ if err != nil && dcc.catchpointFirstStage &&
+ ct.enableGeneratingCatchpointFiles {
+ atomic.StoreInt32(&ct.catchpointDataWriting, 0)
}
}()
@@ -384,30 +544,38 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d
dcc.stats.MerkleTrieUpdateDuration = now - dcc.stats.MerkleTrieUpdateDuration
}
- err = updateAccountsHashRound(tx, treeTargetRound)
+ err = updateAccountsHashRound(ctx, tx, treeTargetRound)
if err != nil {
return err
}
- if dcc.isCatchpointRound {
- dcc.trieBalancesHash, err = ct.balancesTrie.RootHash()
+ if dcc.catchpointFirstStage {
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateWritingFirstStageInfo, 1)
if err != nil {
return err
}
}
- return nil
-}
-func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
- var err error
- if dcc.isCatchpointRound {
- dcc.catchpointLabel, err = ct.accountsCreateCatchpointLabel(dcc.newBase+dcc.lookback, dcc.roundTotals, dcc.committedRoundDigest, dcc.trieBalancesHash)
+ err = writeCatchpointStateUint64(
+ ctx, tx, catchpointStateCatchpointLookback, dcc.catchpointLookback)
+ if err != nil {
+ return err
+ }
+
+ for _, round := range ct.calculateCatchpointRounds(dcc) {
+ err = insertUnfinishedCatchpoint(
+ ctx, tx, round, dcc.committedRoundDigests[round-dcc.oldBase-1])
if err != nil {
- ct.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
+ return err
}
}
+
+ return nil
+}
+
+func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
if ct.balancesTrie != nil {
- _, err = ct.balancesTrie.Evict(false)
+ _, err := ct.balancesTrie.Evict(false)
if err != nil {
ct.log.Warnf("merkle trie failed to evict: %v", err)
}
@@ -415,9 +583,6 @@ func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommit
ct.catchpointsMu.Lock()
ct.roundDigest = ct.roundDigest[dcc.offset:]
- if dcc.isCatchpointRound && dcc.catchpointLabel != "" {
- ct.lastCatchpointLabel = dcc.catchpointLabel
- }
ct.catchpointsMu.Unlock()
dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
@@ -427,18 +592,314 @@ func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommit
}
}
+func doRepackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestChunkLen uint64, in *tar.Reader, out *tar.Writer) error {
+ {
+ bytes := protocol.Encode(&header)
+
+ err := out.WriteHeader(&tar.Header{
+ Name: "content.msgpack",
+ Mode: 0600,
+ Size: int64(len(bytes)),
+ })
+ if err != nil {
+ return err
+ }
+
+ _, err = out.Write(bytes)
+ if err != nil {
+ return err
+ }
+ }
+
+ // make buffer for re-use that can fit biggest chunk
+ buf := make([]byte, biggestChunkLen)
+ for {
+ err := ctx.Err()
+ if err != nil {
+ return err
+ }
+
+ header, err := in.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ n, err := io.ReadAtLeast(in, buf, int(header.Size))
+ if (err != nil) && (err != io.EOF) {
+ return err
+ }
+ if int64(n) != header.Size { // should not happen
+ return fmt.Errorf("read too many bytes from chunk %+v", header)
+ }
+
+ err = out.WriteHeader(header)
+ if err != nil {
+ return err
+ }
+
+ _, err = out.Write(buf[:header.Size])
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestChunkLen uint64, dataPath string, outPath string) error {
+ // Initialize streams.
+ fin, err := os.OpenFile(dataPath, os.O_RDONLY, 0666)
+ if err != nil {
+ return err
+ }
+ defer fin.Close()
+
+ compressorIn, err := catchpointStage1Decoder(fin)
+ if err != nil {
+ return err
+ }
+ defer compressorIn.Close()
+
+ tarIn := tar.NewReader(compressorIn)
+
+ fout, err := os.OpenFile(outPath, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return err
+ }
+ defer fout.Close()
+
+ gzipOut, err := gzip.NewWriterLevel(fout, gzip.BestSpeed)
+ if err != nil {
+ return err
+ }
+ defer gzipOut.Close()
+
+ tarOut := tar.NewWriter(gzipOut)
+ defer tarOut.Close()
+
+ // Repack.
+ err = doRepackCatchpoint(ctx, header, biggestChunkLen, tarIn, tarOut)
+ if err != nil {
+ return err
+ }
+
+ // Close streams.
+ err = tarOut.Close()
+ if err != nil {
+ return err
+ }
+
+ err = gzipOut.Close()
+ if err != nil {
+ return err
+ }
+
+ err = fout.Close()
+ if err != nil {
+ return err
+ }
+
+ err = compressorIn.Close()
+ if err != nil {
+ return err
+ }
+
+ err = fin.Close()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Create a catchpoint (a label and possibly a file with db record) and remove
+// the unfinished catchpoint record.
+func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo catchpointFirstStageInfo, blockHash crypto.Digest) error {
+ startTime := time.Now()
+ label := ledgercore.MakeCatchpointLabel(
+ round, blockHash, dataInfo.TrieBalancesHash, dataInfo.Totals).String()
+
+ ct.log.Infof(
+ "creating catchpoint round: %d accountsRound: %d label: %s",
+ round, accountsRound, label)
+
+ err := writeCatchpointStateString(
+ ctx, ct.dbs.Wdb.Handle, catchpointStateLastCatchpoint, label)
+ if err != nil {
+ return err
+ }
+
+ ct.catchpointsMu.Lock()
+ ct.lastCatchpointLabel = label
+ ct.catchpointsMu.Unlock()
+
+ if !ct.enableGeneratingCatchpointFiles {
+ return nil
+ }
+
+ catchpointDataFilePath := filepath.Join(ct.dbDirectory, CatchpointDirName)
+ catchpointDataFilePath =
+ filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound))
+
+ // Check if the data file exists.
+ _, err = os.Stat(catchpointDataFilePath)
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ // Make a catchpoint file.
+ header := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: accountsRound,
+ BlocksRound: round,
+ Totals: dataInfo.Totals,
+ TotalAccounts: dataInfo.TotalAccounts,
+ TotalChunks: dataInfo.TotalChunks,
+ Catchpoint: label,
+ BlockHeaderDigest: blockHash,
+ }
+
+ relCatchpointFilePath :=
+ filepath.Join(CatchpointDirName, makeCatchpointFilePath(round))
+ absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath)
+
+ err = os.MkdirAll(filepath.Dir(absCatchpointFilePath), 0700)
+ if err != nil {
+ return err
+ }
+
+ err = repackCatchpoint(ctx, header, dataInfo.BiggestChunkLen, catchpointDataFilePath, absCatchpointFilePath)
+ if err != nil {
+ return err
+ }
+
+ fileInfo, err := os.Stat(absCatchpointFilePath)
+ if err != nil {
+ return err
+ }
+
+ err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ err = ct.recordCatchpointFile(ctx, tx, round, relCatchpointFilePath, fileInfo.Size())
+ if err != nil {
+ return err
+ }
+ return deleteUnfinishedCatchpoint(ctx, tx, round)
+ })
+ if err != nil {
+ return err
+ }
+
+ ct.log.With("accountsRound", accountsRound).
+ With("writingDuration", uint64(time.Since(startTime).Nanoseconds())).
+ With("accountsCount", dataInfo.TotalAccounts).
+ With("fileSize", fileInfo.Size()).
+ With("catchpointLabel", label).
+ Infof("Catchpoint file was created")
+
+ return nil
+}
+
+// Try create a catchpoint (a label and possibly a file with db record) and remove
+// the unfinished catchpoint record.
+func (ct *catchpointTracker) finishCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest, catchpointLookback uint64) error {
+ accountsRound := round - basics.Round(catchpointLookback)
+
+ ct.log.Infof("finishing catchpoint round: %d accountsRound: %d", round, accountsRound)
+
+ dataInfo, exists, err :=
+ selectCatchpointFirstStageInfo(ctx, ct.dbs.Rdb.Handle, accountsRound)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return deleteUnfinishedCatchpoint(ctx, ct.dbs.Wdb.Handle, round)
+ }
+ return ct.createCatchpoint(ctx, accountsRound, round, dataInfo, blockHash)
+}
+
+// Calculate catchpoint round numbers in [min, max]. `catchpointInterval` must be
+// non-zero.
+func calculateCatchpointRounds(min basics.Round, max basics.Round, catchpointInterval uint64) []basics.Round {
+ var res []basics.Round
+
+ // The smallest integer i such that i * ct.catchpointInterval >= first.
+ l := (uint64(min) + catchpointInterval - 1) / catchpointInterval
+ // The largest integer i such that i * ct.catchpointInterval <= last.
+ r := uint64(max) / catchpointInterval
+
+ for i := l; i <= r; i++ {
+ round := basics.Round(i * catchpointInterval)
+ res = append(res, round)
+ }
+
+ return res
+}
+
+func (ct *catchpointTracker) calculateCatchpointRounds(dcc *deferredCommitContext) []basics.Round {
+ if ct.catchpointInterval == 0 {
+ return nil
+ }
+
+ min := dcc.oldBase + 1
+ if dcc.catchpointLookback+1 > uint64(min) {
+ min = basics.Round(dcc.catchpointLookback) + 1
+ }
+ return calculateCatchpointRounds(min, dcc.newBase, ct.catchpointInterval)
+}
+
+// Delete old first stage catchpoint records and data files.
+func (ct *catchpointTracker) pruneFirstStageRecordsData(ctx context.Context, maxRoundToDelete basics.Round) error {
+ rounds, err := selectOldCatchpointFirstStageInfoRounds(
+ ctx, ct.dbs.Rdb.Handle, maxRoundToDelete)
+ if err != nil {
+ return err
+ }
+
+ for _, round := range rounds {
+ relCatchpointDataFilePath :=
+ filepath.Join(CatchpointDirName, makeCatchpointDataFilePath(round))
+ err = removeSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointDataFilePath)
+ if err != nil {
+ return err
+ }
+ }
+
+ return deleteOldCatchpointFirstStageInfo(ctx, ct.dbs.Rdb.Handle, maxRoundToDelete)
+}
+
func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
- if dcc.isCatchpointRound && ct.enableGeneratingCatchpointFiles && dcc.catchpointLabel != "" {
- // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
- // the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
- ct.generateCatchpoint(ctx, basics.Round(dcc.offset)+dcc.oldBase+dcc.lookback, dcc.catchpointLabel, dcc.committedRoundDigest, dcc.updatingBalancesDuration)
+ if dcc.catchpointFirstStage {
+ err := ct.finishFirstStage(ctx, dcc.newBase, dcc.updatingBalancesDuration)
+ if err != nil {
+ ct.log.Warnf(
+ "error finishing catchpoint's first stage dcc.newBase: %d err: %v",
+ dcc.newBase, err)
+ }
}
- // in scheduleCommit, we expect that this function to update the catchpointWriting when
- // it's on a catchpoint round and the node is configured to generate a catchpoint file. Doing this in a deferred function
- // here would prevent us from "forgetting" to update this variable later on.
- if dcc.isCatchpointRound && ct.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointWriting, 0)
+ // Generate catchpoints for rounds in (dcc.oldBase, dcc.newBase].
+ for _, round := range ct.calculateCatchpointRounds(dcc) {
+ err := ct.finishCatchpoint(
+ ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1], dcc.catchpointLookback)
+ if err != nil {
+ ct.log.Warnf("error creating catchpoint round: %d err: %v", round, err)
+ }
+ }
+
+ // Prune first stage catchpoint records from the database.
+ if uint64(dcc.newBase) >= dcc.catchpointLookback {
+ err := ct.pruneFirstStageRecordsData(
+ ctx, dcc.newBase-basics.Round(dcc.catchpointLookback))
+ if err != nil {
+ ct.log.Warnf(
+ "error pruning first stage records and data dcc.newBase: %d err: %v",
+ dcc.newBase, err)
+ }
}
}
@@ -449,9 +910,9 @@ func (ct *catchpointTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
// if the node is configured to generate catchpoint files, we might need to update the catchpointWriting variable.
if ct.enableGeneratingCatchpointFiles {
// determine if this was a catchpoint round
- if dcc.isCatchpointRound {
+ if dcc.catchpointFirstStage {
// it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
- atomic.StoreInt32(&ct.catchpointWriting, 0)
+ atomic.StoreInt32(&ct.catchpointDataWriting, 0)
}
}
}
@@ -461,7 +922,6 @@ func (ct *catchpointTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
// be called even if loadFromDisk() is not called or does
// not succeed.
func (ct *catchpointTracker) close() {
-
}
// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
@@ -563,70 +1023,31 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
return
}
-// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
-// to avoid memory pressure until the catchpoint file writing is complete.
-func (ct *catchpointTracker) IsWritingCatchpointFile() bool {
- return atomic.LoadInt32(&ct.catchpointWriting) != 0
+// IsWritingCatchpointDataFile returns true iff a (first stage) catchpoint data file
+// is being generated.
+func (ct *catchpointTracker) IsWritingCatchpointDataFile() bool {
+ return atomic.LoadInt32(&ct.catchpointDataWriting) != 0
}
-// isCatchpointRound returns true if the round at the given offset, dbRound with the provided lookback should be a catchpoint round.
-func (ct *catchpointTracker) isCatchpointRound(offset uint64, dbRound basics.Round, lookback basics.Round) bool {
- if !ct.forceCatchpointFileWriting {
- if ct.accountDataResourceSeparationRound == basics.Round(0) {
- return false
- }
+// Generates a (first stage) catchpoint data file.
+func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, updatingBalancesDuration time.Duration) (uint64 /*totalAccounts*/, uint64 /*totalChunks*/, uint64 /*biggestChunkLen*/, error) {
+ ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound)
- if ct.accountDataResourceSeparationRound > (basics.Round(offset) + dbRound + lookback) {
- return false
- }
- }
- return ((offset + uint64(lookback+dbRound)) > 0) && (ct.catchpointInterval != 0) && ((uint64((offset + uint64(lookback+dbRound))) % ct.catchpointInterval) == 0)
-}
-
-// accountsCreateCatchpointLabel creates a catchpoint label and write it.
-func (ct *catchpointTracker) accountsCreateCatchpointLabel(committedRound basics.Round, totals ledgercore.AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
- cpLabel := ledgercore.MakeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
- label = cpLabel.String()
- _, err = ct.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
- return
-}
-
-// generateCatchpoint generates a single catchpoint file
-func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
- beforeGeneratingCatchpointTime := time.Now()
+ startTime := time.Now()
catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
}
- // the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
- // before the catchpoint file generation could be completed.
- retryCatchpointCreation := false
- ct.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
- defer func() {
- if !retryCatchpointCreation {
- // clear the writingCatchpoint flag
- _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
- if err != nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- }
- }
- }()
-
- _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
- if err != nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- return
- }
-
- relCatchpointFileName := filepath.Join(CatchpointDirName, catchpointRoundToPath(committedRound))
- absCatchpointFileName := filepath.Join(ct.dbDirectory, relCatchpointFileName)
+ catchpointDataFilePath := filepath.Join(ct.dbDirectory, CatchpointDirName)
+ catchpointDataFilePath =
+ filepath.Join(catchpointDataFilePath, makeCatchpointDataFilePath(accountsRound))
more := true
const shortChunkExecutionDuration = 50 * time.Millisecond
const longChunkExecutionDuration = 1 * time.Second
var chunkExecutionDuration time.Duration
select {
- case <-ct.catchpointSlowWriting:
+ case <-ct.catchpointDataSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
default:
chunkExecutionDuration = shortChunkExecutionDuration
@@ -635,8 +1056,11 @@ func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRo
var catchpointWriter *catchpointWriter
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
- err = ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter = makeCatchpointWriter(ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
+ err := ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
+ catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx)
+ if err != nil {
+ return
+ }
for more {
stepCtx, stepCancelFunction := context.WithTimeout(ctx, chunkExecutionDuration)
writeStepStartTime := time.Now()
@@ -661,21 +1085,23 @@ func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRo
chunkExecutionDuration = longChunkExecutionDuration
}
case <-ctx.Done():
- retryCatchpointCreation = true
+ //retryCatchpointCreation = true
err2 := catchpointWriter.Abort()
if err2 != nil {
return fmt.Errorf("error removing catchpoint file : %v", err2)
}
return nil
- case <-ct.catchpointSlowWriting:
+ case <-ct.catchpointDataSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
}
}
if err != nil {
- err = fmt.Errorf("unable to create catchpoint for round %d: %v", committedRound, err)
+ err = fmt.Errorf(
+ "unable to create catchpoint data file for round %d: %v",
+ accountsRound, err)
err2 := catchpointWriter.Abort()
if err2 != nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
+ ct.log.Warnf("catchpointTracker.generateCatchpointData() error removing catchpoint file : %v", err2)
}
return
}
@@ -683,62 +1109,93 @@ func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRo
return
})
ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
-
if err != nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
- return
- }
- if catchpointWriter == nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
- return
+ ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err)
+ return 0, 0, 0, err
}
- err = ct.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
- if err != nil {
- ct.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
- return
- }
catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
- catchpointGenerationStats.WritingDuration = uint64(time.Since(beforeGeneratingCatchpointTime).Nanoseconds())
+ catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds())
catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
- catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
- ct.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
+ ct.log.With("accountsRound", accountsRound).
+ With("writingDuration", catchpointGenerationStats.WritingDuration).
With("CPUTime", catchpointGenerationStats.CPUTime).
With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
With("accountsCount", catchpointGenerationStats.AccountsCount).
With("fileSize", catchpointGenerationStats.FileSize).
With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
- Infof("Catchpoint file was generated")
+ Infof("Catchpoint data file was generated")
+
+ return catchpointWriter.GetTotalAccounts(), catchpointWriter.GetTotalChunks(), catchpointWriter.GetBiggestChunkLen(), nil
+}
+
+func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.Tx, accountsRound basics.Round, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error {
+ accountTotals, err := accountsTotals(ctx, tx, false)
+ if err != nil {
+ return err
+ }
+
+ {
+ mc, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return err
+ }
+ if ct.balancesTrie == nil {
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ return err
+ }
+ ct.balancesTrie = trie
+ } else {
+ ct.balancesTrie.SetCommitter(mc)
+ }
+ }
+ trieBalancesHash, err := ct.balancesTrie.RootHash()
+ if err != nil {
+ return err
+ }
+
+ info := catchpointFirstStageInfo{
+ Totals: accountTotals,
+ TotalAccounts: totalAccounts,
+ TotalChunks: totalChunks,
+ BiggestChunkLen: biggestChunkLen,
+ TrieBalancesHash: trieBalancesHash,
+ }
+ return insertOrReplaceCatchpointFirstStageInfo(ctx, tx, accountsRound, &info)
+}
+
+func makeCatchpointDataFilePath(accountsRound basics.Round) string {
+ return strconv.FormatInt(int64(accountsRound), 10) + ".data"
}
-// catchpointRoundToPath calculate the catchpoint file path for a given round
-func catchpointRoundToPath(rnd basics.Round) string {
- irnd := int64(rnd) / 256
+func makeCatchpointFilePath(round basics.Round) string {
+ irnd := int64(round) / 256
outStr := ""
for irnd > 0 {
outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
irnd = irnd / 256
}
- outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
+ outStr = filepath.Join(outStr, strconv.FormatInt(int64(round), 10)+".catchpoint")
return outStr
}
-// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
+// recordCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
// after a successful insert operation to the database, it would delete up to 2 old entries, as needed.
// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
// database and storage realign.
-func (ct *catchpointTracker) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
+func (ct *catchpointTracker) recordCatchpointFile(ctx context.Context, e db.Executable, round basics.Round, relCatchpointFilePath string, fileSize int64) (err error) {
if ct.catchpointFileHistoryLength != 0 {
- err = ct.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
+ err = storeCatchpoint(ctx, e, round, relCatchpointFilePath, "", fileSize)
if err != nil {
- ct.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
+ ct.log.Warnf("catchpointTracker.recordCatchpointFile() unable to save catchpoint: %v", err)
return
}
} else {
- err = os.Remove(fileName)
+ err = removeSingleCatchpointFileFromDisk(ct.dbDirectory, relCatchpointFilePath)
if err != nil {
- ct.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
+ ct.log.Warnf("catchpointTracker.recordCatchpointFile() unable to remove file (%s): %v", relCatchpointFilePath, err)
return
}
}
@@ -746,7 +1203,7 @@ func (ct *catchpointTracker) saveCatchpointFile(round basics.Round, fileName str
return
}
var filesToDelete map[basics.Round]string
- filesToDelete, err = ct.accountsq.getOldestCatchpointFiles(context.Background(), 2, ct.catchpointFileHistoryLength)
+ filesToDelete, err = getOldestCatchpointFiles(ctx, e, 2, ct.catchpointFileHistoryLength)
if err != nil {
return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
}
@@ -755,7 +1212,7 @@ func (ct *catchpointTracker) saveCatchpointFile(round basics.Round, fileName str
if err != nil {
return err
}
- err = ct.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
+ err = storeCatchpoint(ctx, e, round, "", "", 0)
if err != nil {
return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
}
@@ -770,13 +1227,13 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
start := time.Now()
ledgerGetcatchpointCount.Inc(nil)
err := ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- dbFileName, _, fileSize, err = getCatchpoint(tx, round)
+ dbFileName, _, fileSize, err = getCatchpoint(ctx, tx, round)
return
})
ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil)
if err != nil && err != sql.ErrNoRows {
// we had some sql error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
+ return nil, fmt.Errorf("catchpointTracker.GetCatchpointStream() unable to lookup catchpoint %d: %v", round, err)
}
if dbFileName != "" {
catchpointPath := filepath.Join(ct.dbDirectory, dbFileName)
@@ -788,22 +1245,24 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
if os.IsNotExist(err) {
// the database told us that we have this file.. but we couldn't find it.
// delete it from the database.
- err := ct.saveCatchpointFile(round, "", 0, "")
+ err := ct.recordCatchpointFile(
+ context.Background(), ct.dbs.Wdb.Handle, round, "", 0)
if err != nil {
- ct.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
+ ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to delete missing catchpoint entry: %v", err)
return nil, err
}
return nil, ledgercore.ErrNoEntry{}
}
// it's some other error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
+ return nil, fmt.Errorf("catchpointTracker.GetCatchpointStream() unable to open catchpoint file '%s' %v", catchpointPath, err)
}
// if the database doesn't know about that round, see if we have that file anyway:
- fileName := filepath.Join(CatchpointDirName, catchpointRoundToPath(round))
- catchpointPath := filepath.Join(ct.dbDirectory, fileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ relCatchpointFilePath :=
+ filepath.Join(CatchpointDirName, makeCatchpointFilePath(round))
+ absCatchpointFilePath := filepath.Join(ct.dbDirectory, relCatchpointFilePath)
+ file, err := os.OpenFile(absCatchpointFilePath, os.O_RDONLY, 0666)
if err == nil && file != nil {
// great, if found that we should have had this in the database.. add this one now :
fileInfo, err := file.Stat()
@@ -812,9 +1271,11 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
return &readCloseSizer{ReadCloser: file, size: -1}, nil
}
- err = ct.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
+ err = ct.recordCatchpointFile(
+ context.Background(), ct.dbs.Wdb.Handle, round, relCatchpointFilePath,
+ fileInfo.Size())
if err != nil {
- ct.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
+ ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to save missing catchpoint entry: %v", err)
}
return &readCloseSizer{ReadCloser: file, size: fileInfo.Size()}, nil
}
@@ -823,10 +1284,10 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
// once all the files have been deleted, it would go ahead and remove the entries from the table.
-func deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries, dbDirectory string) (err error) {
+func deleteStoredCatchpoints(ctx context.Context, e db.Executable, dbDirectory string) (err error) {
catchpointsFilesChunkSize := 50
for {
- fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
+ fileNames, err := getOldestCatchpointFiles(ctx, e, catchpointsFilesChunkSize, 0)
if err != nil {
return err
}
@@ -840,7 +1301,7 @@ func deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries,
return err
}
// clear the entry from the database
- err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
+ err = storeCatchpoint(ctx, e, round, "", "", 0)
if err != nil {
return err
}
@@ -960,7 +1421,7 @@ func (ct *catchpointTracker) catchpointEnabled() bool {
// accountsInitializeHashes initializes account hashes.
// as part of the initialization, it tests if a hash table matches to account base and updates the former.
func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error {
- hashRound, err := accountsHashRound(tx)
+ hashRound, err := accountsHashRound(ctx, tx)
if err != nil {
return err
}
@@ -968,7 +1429,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
if hashRound != rnd {
// if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
// with the hashes.
- err = resetAccountHashes(tx)
+ err = resetAccountHashes(ctx, tx)
if err != nil {
return err
}
@@ -1068,7 +1529,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
}
// we've just updated the merkle trie, update the hashRound to reflect that.
- err = updateAccountsHashRound(tx, rnd)
+ err = updateAccountsHashRound(ctx, tx, rnd)
if err != nil {
return fmt.Errorf("accountsInitialize was unable to update the account hash round to %d: %v", rnd, err)
}
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 025c7ebfe..2d7bebcd5 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -19,13 +19,14 @@ package ledger
import (
"context"
"database/sql"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
-
+ "strings"
"sync/atomic"
"testing"
"time"
@@ -49,24 +50,26 @@ func TestIsWritingCatchpointFile(t *testing.T) {
ct := &catchpointTracker{}
- ct.catchpointWriting = -1
- ans := ct.IsWritingCatchpointFile()
+ ct.catchpointDataWriting = -1
+ ans := ct.IsWritingCatchpointDataFile()
require.True(t, ans)
- ct.catchpointWriting = 0
- ans = ct.IsWritingCatchpointFile()
+ ct.catchpointDataWriting = 0
+ ans = ct.IsWritingCatchpointDataFile()
require.False(t, ans)
}
func newCatchpointTracker(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *catchpointTracker {
au := &accountUpdates{}
ct := &catchpointTracker{}
+ ao := &onlineAccounts{}
au.initialize(conf)
ct.initialize(conf, dbPathPrefix)
+ ao.initialize(conf)
_, err := trackerDBInitialize(l, ct.catchpointEnabled(), dbPathPrefix)
require.NoError(tb, err)
- err = l.trackers.initialize(l, []ledgerTracker{au, ct}, conf)
+ err = l.trackers.initialize(l, []ledgerTracker{au, ct, ao, &txTail{}}, conf)
require.NoError(tb, err)
err = l.trackers.loadFromDisk(l)
require.NoError(tb, err)
@@ -88,26 +91,22 @@ func TestGetCatchpointStream(t *testing.T) {
filesToCreate := 4
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), CatchpointDirName)
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
- catchpointsDirectory := filepath.Join(temporaryDirectroy, CatchpointDirName)
- err = os.Mkdir(catchpointsDirectory, 0777)
+ temporaryDirectory := t.TempDir()
+ catchpointsDirectory := filepath.Join(temporaryDirectory, CatchpointDirName)
+ err := os.Mkdir(catchpointsDirectory, 0777)
require.NoError(t, err)
- ct.dbDirectory = temporaryDirectroy
+ ct.dbDirectory = temporaryDirectory
// Create the catchpoint files with dummy data
for i := 0; i < filesToCreate; i++ {
fileName := filepath.Join(CatchpointDirName, fmt.Sprintf("%d.catchpoint", i))
data := []byte{byte(i), byte(i + 1), byte(i + 2)}
- err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666)
+ err = ioutil.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666)
require.NoError(t, err)
// Store the catchpoint into the database
- err := ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", int64(len(data)))
+ err := storeCatchpoint(context.Background(), ml.dbs.Wdb.Handle, basics.Round(i), fileName, "", int64(len(data)))
require.NoError(t, err)
}
@@ -127,14 +126,14 @@ func TestGetCatchpointStream(t *testing.T) {
require.Equal(t, int64(3), len)
// File deleted, but record in the database
- err = os.Remove(filepath.Join(temporaryDirectroy, CatchpointDirName, "2.catchpoint"))
+ err = os.Remove(filepath.Join(temporaryDirectory, CatchpointDirName, "2.catchpoint"))
require.NoError(t, err)
reader, err = ct.GetCatchpointStream(basics.Round(2))
require.Equal(t, ledgercore.ErrNoEntry{}, err)
require.Nil(t, reader)
// File on disk, but database lost the record
- err = ct.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0)
+ err = storeCatchpoint(context.Background(), ml.dbs.Wdb.Handle, basics.Round(3), "", "", 0)
require.NoError(t, err)
reader, err = ct.GetCatchpointStream(basics.Round(3))
require.NoError(t, err)
@@ -144,26 +143,21 @@ func TestGetCatchpointStream(t *testing.T) {
outData = []byte{3, 4, 5}
require.Equal(t, outData, dataRead)
- err = deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ err = deleteStoredCatchpoints(context.Background(), ml.dbs.Wdb.Handle, ct.dbDirectory)
require.NoError(t, err)
}
// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
-// it doing so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
-// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test call the function
-// and ensures that it did not errored, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
+// It does so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
+// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test calls the function
+// and ensures that it did not error, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
// entries.
func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
partitiontest.PartitionTest(t)
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), CatchpointDirName)
-
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
+ temporaryDirectory := t.TempDir()
ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
defer ml.Close()
@@ -171,7 +165,7 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
conf.CatchpointInterval = 1
ct := newCatchpointTracker(t, ml, conf, ".")
defer ct.close()
- ct.dbDirectory = temporaryDirectroy
+ ct.dbDirectory = temporaryDirectory
dummyCatchpointFilesToCreate := 42
@@ -182,7 +176,7 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
i/10, os.PathSeparator,
i/2, os.PathSeparator,
i)
- absFile := filepath.Join(temporaryDirectroy, file)
+ absFile := filepath.Join(temporaryDirectory, file)
dummyCatchpointFiles[i] = absFile
err := os.MkdirAll(path.Dir(absFile), 0755)
require.NoError(t, err)
@@ -190,11 +184,11 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
- err = ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), file, "", 0)
+ err = storeCatchpoint(context.Background(), ml.dbs.Wdb.Handle, basics.Round(i), file, "", 0)
require.NoError(t, err)
}
- err = deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ err := deleteStoredCatchpoints(context.Background(), ml.dbs.Wdb.Handle, ct.dbDirectory)
require.NoError(t, err)
// ensure that all the files were deleted.
@@ -202,14 +196,14 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
_, err := os.Open(file)
require.True(t, os.IsNotExist(err))
}
- fileNames, err := ct.accountsq.getOldestCatchpointFiles(context.Background(), dummyCatchpointFilesToCreate, 0)
+ fileNames, err := getOldestCatchpointFiles(context.Background(), ml.dbs.Rdb.Handle, dummyCatchpointFilesToCreate, 0)
require.NoError(t, err)
require.Equal(t, 0, len(fileNames))
}
// The test validate that when algod boots up it cleans empty catchpoint directories.
-// it is done be creating empty directories in the catchpoint root directory.
-// When algod boots up it should remove those directories
+// It is done by creating empty directories in the catchpoint root directory.
+// When algod boots up it should remove those directories.
func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -217,16 +211,12 @@ func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) {
if accountDBVersion < 6 {
return
}
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), CatchpointDirName)
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
+ temporaryDirectroy := t.TempDir()
tempCatchpointDir := filepath.Join(temporaryDirectroy, CatchpointDirName)
// creating empty catchpoint directories
emptyDirPath := path.Join(tempCatchpointDir, "2f", "e1")
- err = os.MkdirAll(emptyDirPath, 0755)
+ err := os.MkdirAll(emptyDirPath, 0755)
require.NoError(t, err)
emptyDirPath = path.Join(tempCatchpointDir, "2e", "e1")
err = os.MkdirAll(emptyDirPath, 0755)
@@ -268,7 +258,7 @@ func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) {
func getNumberOfCatchpointFilesInDir(catchpointDir string) (int, error) {
numberOfCatchpointFiles := 0
err := filepath.Walk(catchpointDir, func(path string, d os.FileInfo, err error) error {
- if !d.IsDir() {
+ if !d.IsDir() && strings.HasSuffix(path, ".catchpoint") {
numberOfCatchpointFiles++
}
return nil
@@ -276,16 +266,14 @@ func getNumberOfCatchpointFilesInDir(catchpointDir string) (int, error) {
return numberOfCatchpointFiles, err
}
-// The goal in this test is to check that we are saving at most X catchpoint files. If algod needs to create a new catchfile it will delete
-// the oldest. In addtion, when deleting old catchpoint files an empty directory should be deleted as well.
-func TestSaveCatchpointFile(t *testing.T) {
+// The goal of this test is to check that we are saving at most X catchpoint files.
+// If algod needs to create a new catchpoint file it will delete the oldest.
+// In addition, when deleting old catchpoint files an empty directory should be deleted
+// as well.
+func TestRecordCatchpointFile(t *testing.T) {
partitiontest.PartitionTest(t)
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), CatchpointDirName)
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
+ temporaryDirectory := t.TempDir()
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
@@ -295,33 +283,40 @@ func TestSaveCatchpointFile(t *testing.T) {
conf := config.GetDefaultLocal()
conf.CatchpointFileHistoryLength = 3
+ conf.Archival = true
ct.initialize(conf, ".")
defer ct.close()
- ct.dbDirectory = temporaryDirectroy
+ ct.dbDirectory = temporaryDirectory
- _, err = trackerDBInitialize(ml, true, ct.dbDirectory)
+ _, err := trackerDBInitialize(ml, true, ct.dbDirectory)
require.NoError(t, err)
err = ct.loadFromDisk(ml, ml.Latest())
require.NoError(t, err)
- ct.generateCatchpoint(context.Background(), basics.Round(2000000), "0#ABC1", crypto.Digest{}, time.Second)
- ct.generateCatchpoint(context.Background(), basics.Round(3000010), "0#ABC2", crypto.Digest{}, time.Second)
- ct.generateCatchpoint(context.Background(), basics.Round(3000015), "0#ABC3", crypto.Digest{}, time.Second)
- ct.generateCatchpoint(context.Background(), basics.Round(3000020), "0#ABC4", crypto.Digest{}, time.Second)
+ for _, round := range []basics.Round{2000000, 3000010, 3000015, 3000020} {
+ accountsRound := round - 1
+
+ _, _, biggestChunkLen, err := ct.generateCatchpointData(
+ context.Background(), accountsRound, time.Second)
+ require.NoError(t, err)
- numberOfCatchpointFiles, err := getNumberOfCatchpointFilesInDir(temporaryDirectroy)
+ err = ct.createCatchpoint(context.Background(), accountsRound, round, catchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{})
+ require.NoError(t, err)
+ }
+
+ numberOfCatchpointFiles, err := getNumberOfCatchpointFilesInDir(temporaryDirectory)
require.NoError(t, err)
- require.Equal(t, numberOfCatchpointFiles, conf.CatchpointFileHistoryLength)
+ require.Equal(t, conf.CatchpointFileHistoryLength, numberOfCatchpointFiles)
- emptyDirs, err := getEmptyDirs(temporaryDirectroy)
+ emptyDirs, err := getEmptyDirs(temporaryDirectory)
require.NoError(t, err)
onlyCatchpointDirEmpty := len(emptyDirs) == 0 ||
- (len(emptyDirs) == 1 && emptyDirs[0] == temporaryDirectroy)
+ (len(emptyDirs) == 1 && emptyDirs[0] == temporaryDirectory)
require.Equalf(t, onlyCatchpointDirEmpty, true, "Directories: %v", emptyDirs)
}
-func BenchmarkLargeCatchpointWriting(b *testing.B) {
+func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)}
@@ -344,13 +339,9 @@ func BenchmarkLargeCatchpointWriting(b *testing.B) {
ct := catchpointTracker{}
ct.initialize(cfg, ".")
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), CatchpointDirName)
- require.NoError(b, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
+ temporaryDirectroy := b.TempDir()
catchpointsDirectory := filepath.Join(temporaryDirectroy, CatchpointDirName)
- err = os.Mkdir(catchpointsDirectory, 0777)
+ err := os.Mkdir(catchpointsDirectory, 0777)
require.NoError(b, err)
ct.dbDirectory = temporaryDirectroy
@@ -378,12 +369,12 @@ func BenchmarkLargeCatchpointWriting(b *testing.B) {
}
}
- return updateAccountsHashRound(tx, 1)
+ return updateAccountsHashRound(ctx, tx, 1)
})
require.NoError(b, err)
b.ResetTimer()
- ct.generateCatchpoint(context.Background(), basics.Round(0), "0#ABCD", crypto.Digest{}, time.Second)
+ ct.generateCatchpointData(context.Background(), basics.Round(0), time.Second)
b.StopTimer()
b.ReportMetric(float64(accountsNumber), "accounts")
}
@@ -397,9 +388,8 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 32
- protoParams.SeedLookback = 2
- protoParams.SeedRefreshInterval = 8
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -438,7 +428,12 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
catchpointLabels := make(map[basics.Round]string)
ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
- for i := basics.Round(1); i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ numCatchpointsCreated := 0
+ i := basics.Round(0)
+ lastCatchpointLabel := ""
+
+ for numCatchpointsCreated < testCatchpointLabelsCount {
+ i++
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
var updates ledgercore.AccountDeltas
@@ -479,25 +474,34 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
rewardsLevels = append(rewardsLevels, rewardLevel)
roundDeltas[i] = delta
- // if this is a catchpoint round, save the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
+ // If we made a catchpoint, save the label.
+ if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
ml.trackers.waitAccountsWriting()
catchpointLabels[i] = ct.GetLastCatchpointLabel()
+ require.NotEqual(t, lastCatchpointLabel, catchpointLabels[i])
+ lastCatchpointLabel = catchpointLabels[i]
ledgerHistory[i] = ml.fork(t)
defer ledgerHistory[i].Close()
+ numCatchpointsCreated++
+ }
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.IsWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
}
}
+ lastRound := i
- // test in revese what happens when we try to repeat the exact same blocks.
- // start off with the catchpoint before the last one
- startingRound := basics.Round((testCatchpointLabelsCount - 1) * cfg.CatchpointInterval)
- for ; startingRound > basics.Round(cfg.CatchpointInterval); startingRound -= basics.Round(cfg.CatchpointInterval) {
+ // Test in reverse what happens when we try to repeat the exact same blocks.
+ // Start off with the catchpoint before the last one.
+ for startingRound := lastRound - basics.Round(cfg.CatchpointInterval); uint64(startingRound) > protoParams.CatchpointLookback; startingRound -= basics.Round(cfg.CatchpointInterval) {
au.close()
ml2 := ledgerHistory[startingRound]
+ require.NotNil(t, ml2)
ct2 := newCatchpointTracker(t, ml2, cfg, ".")
defer ct2.close()
- for i := startingRound + 1; i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ for i := startingRound + 1; i <= lastRound; i++ {
blk := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
Round: basics.Round(i),
@@ -506,14 +510,20 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
blk.RewardsLevel = rewardsLevels[i]
blk.CurrentProtocol = testProtocolVersion
delta := roundDeltas[i]
+
ml2.trackers.newBlock(blk, delta)
ml2.trackers.committedUpTo(i)
// if this is a catchpoint round, check the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
+ if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
ml2.trackers.waitAccountsWriting()
require.Equal(t, catchpointLabels[i], ct2.GetLastCatchpointLabel())
}
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.IsWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
}
}
@@ -521,40 +531,12 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
require.NotZero(t, len(ct.roundDigest))
require.NoError(t, ct.loadFromDisk(ml, ml.Latest()))
require.Zero(t, len(ct.roundDigest))
- require.Zero(t, ct.catchpointWriting)
+ require.Zero(t, ct.catchpointDataWriting)
select {
- case _, closed := <-ct.catchpointSlowWriting:
+ case _, closed := <-ct.catchpointDataSlowWriting:
require.False(t, closed)
default:
- require.FailNow(t, "The catchpointSlowWriting should have been a closed channel; it seems to be a nil ?!")
- }
-}
-
-func TestCatchpointTrackerPrepareCommit(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- ct := &catchpointTracker{}
- const maxOffset = 40
- const maxLookback = 320
- ct.roundDigest = make([]crypto.Digest, maxOffset+maxLookback)
- for i := 0; i < len(ct.roundDigest); i++ {
- ct.roundDigest[i] = crypto.Hash([]byte{byte(i), byte(i / 256)})
- }
- dcc := &deferredCommitContext{}
- for offset := uint64(1); offset < maxOffset; offset++ {
- dcc.offset = offset
- for lookback := basics.Round(0); lookback < maxLookback; lookback += 20 {
- dcc.lookback = lookback
- for _, isCatchpointRound := range []bool{false, true} {
- dcc.isCatchpointRound = isCatchpointRound
- require.NoError(t, ct.prepareCommit(dcc))
- if isCatchpointRound {
- expectedRound := offset + uint64(lookback) - 1
- expectedHash := crypto.Hash([]byte{byte(expectedRound), byte(expectedRound / 256)})
- require.Equal(t, expectedHash[:], dcc.committedRoundDigest[:])
- }
- }
- }
+ require.FailNow(t, "The catchpointDataSlowWriting should have been a closed channel; it seems to be a nil ?!")
}
}
@@ -566,6 +548,7 @@ type blockingTracker struct {
postCommitReleaseLock chan struct{}
committedUpToRound int64
alwaysLock bool
+ shouldLockPostCommit bool
}
// loadFromDisk is not implemented in the blockingTracker.
@@ -600,7 +583,7 @@ func (bt *blockingTracker) commitRound(context.Context, *sql.Tx, *deferredCommit
// postCommit implements entry/exit blockers, designed for testing.
func (bt *blockingTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
- if bt.alwaysLock || (dcc.isCatchpointRound && dcc.catchpointLabel != "") {
+ if bt.alwaysLock || dcc.catchpointFirstStage || bt.shouldLockPostCommit {
bt.postCommitEntryLock <- struct{}{}
<-bt.postCommitReleaseLock
}
@@ -608,7 +591,7 @@ func (bt *blockingTracker) postCommit(ctx context.Context, dcc *deferredCommitCo
// postCommitUnlocked implements entry/exit blockers, designed for testing.
func (bt *blockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
- if bt.alwaysLock || (dcc.isCatchpointRound && dcc.catchpointLabel != "") {
+ if bt.alwaysLock || dcc.catchpointFirstStage {
bt.postCommitUnlockedEntryLock <- struct{}{}
<-bt.postCommitUnlockedReleaseLock
}
@@ -627,7 +610,8 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.EnableAccountDataResourceSeparation = true
+ protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.CatchpointLookback = protoParams.MaxBalLookback
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -656,19 +640,19 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
ledger.trackers.mu.Unlock()
ledger.trackerMu.Unlock()
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- // create the first MaxBalLookback blocks
- for rnd := ledger.Latest() + 1; rnd <= basics.Round(proto.MaxBalLookback); rnd++ {
+ // Create the first `cfg.MaxAcctLookback` blocks for which account updates tracker
+ // will skip committing.
+ for rnd := ledger.Latest() + 1; rnd <= basics.Round(cfg.MaxAcctLookback); rnd++ {
err = ledger.addBlockTxns(t, genesisInitState.Accounts, []transactions.SignedTxn{}, transactions.ApplyData{})
require.NoError(t, err)
}
- // make sure to get to a catchpoint round, and block the writing there.
+ // make sure to get to a first stage catchpoint round, and block the writing there.
for {
err = ledger.addBlockTxns(t, genesisInitState.Accounts, []transactions.SignedTxn{}, transactions.ApplyData{})
require.NoError(t, err)
- if uint64(ledger.Latest())%cfg.CatchpointInterval == 0 {
+ if (uint64(ledger.Latest())+protoParams.CatchpointLookback)%
+ cfg.CatchpointInterval == 0 {
// release the entry lock for postCommit
<-writeStallingTracker.postCommitEntryLock
@@ -694,7 +678,7 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
}
lookupDone := make(chan struct{})
- // now that we're blocked the tracker, try to call LookupAgreement and confirm it returns almost immediately
+ // now that we've blocked the tracker, try to call LookupAgreement and confirm it returns almost immediately
go func() {
defer close(lookupDone)
ledger.LookupAgreement(ledger.Latest(), genesisInitState.Block.FeeSink)
@@ -710,13 +694,14 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
// release the exit lock for postCommit
writeStallingTracker.postCommitUnlockedReleaseLock <- struct{}{}
- // test false positive : we want to ensure that without releasing the postCommit lock, the LookupAgreemnt would not be able to return within 1 second.
+ // test false positive : we want to ensure that without releasing the postCommit lock, the LookupAgreement would not be able to return within 1 second.
- // make sure to get to a catchpoint round, and block the writing there.
+ // make sure to get to a first stage catchpoint round, and block the writing there.
for {
err = ledger.addBlockTxns(t, genesisInitState.Accounts, []transactions.SignedTxn{}, transactions.ApplyData{})
require.NoError(t, err)
- if uint64(ledger.Latest())%cfg.CatchpointInterval == 0 {
+ if (uint64(ledger.Latest())+protoParams.CatchpointLookback)%
+ cfg.CatchpointInterval == 0 {
// release the entry lock for postCommit
<-writeStallingTracker.postCommitEntryLock
break
@@ -735,7 +720,7 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
}
lookupDone = make(chan struct{})
- // now that we're blocked the tracker, try to call LookupAgreement and confirm it's not returning within 1 second.
+ // now that we've blocked the tracker, try to call LookupAgreement and confirm it's not returning within 1 second.
go func() {
defer close(lookupDone)
ledger.LookupAgreement(ledger.Latest(), genesisInitState.Block.FeeSink)
@@ -765,3 +750,594 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
require.FailNow(t, "The LookupAgreement wasn't getting release as expected by the blocked tracker")
}
}
+
+func TestCalculateFirstStageRounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type TestCase struct {
+ // input
+ oldBase basics.Round
+ offset uint64
+ accountDataResourceSeparationRound basics.Round
+ catchpointInterval uint64
+ catchpointLookback uint64
+ // output
+ hasIntermediateFirstStageRound bool
+ hasMultipleIntermediateFirstStageRounds bool
+ retOffset uint64
+ }
+ testCases := []TestCase{
+ {0, 6, 1, 10, 3, false, false, 6},
+ {0, 7, 1, 10, 3, true, false, 7},
+ {0, 16, 1, 10, 3, true, false, 7},
+ {0, 17, 1, 10, 3, true, true, 17},
+ {7, 9, 1, 10, 3, false, false, 9},
+ {7, 10, 1, 10, 3, true, false, 10},
+ {7, 19, 1, 10, 3, true, false, 10},
+ {7, 20, 1, 10, 3, true, true, 20},
+ {1, 1, 1, 10, 169, false, false, 1},
+ {1, 9, 1, 10, 169, false, false, 9},
+ {1, 10, 1, 10, 169, true, false, 10},
+ {1, 22, 1, 10, 169, true, true, 20},
+ {1, 95, 100, 1, 3, false, false, 95},
+ {1, 96, 100, 1, 3, true, false, 96},
+ {1, 97, 100, 1, 3, true, true, 97},
+ {1, 97, 99, 10, 3, true, false, 96},
+ {29680, 1, 1, 10000, 320, false, false, 1},
+ {29679, 1, 1, 10000, 320, true, false, 1},
+ {29678, 10003, 1, 10000, 320, true, true, 10002},
+ {79680, 7320, 1, 10000, 320, false, false, 7320},
+ }
+
+ for i, testCase := range testCases {
+ t.Run(fmt.Sprintf("test_case_%d", i), func(t *testing.T) {
+ hasIntermediateFirstStageRound, hasMultipleIntermediateFirstStageRounds, offset :=
+ calculateFirstStageRounds(
+ testCase.oldBase, testCase.offset, testCase.accountDataResourceSeparationRound,
+ testCase.catchpointInterval, testCase.catchpointLookback)
+ require.Equal(
+ t, testCase.hasIntermediateFirstStageRound, hasIntermediateFirstStageRound)
+ require.Equal(
+ t, testCase.hasMultipleIntermediateFirstStageRounds,
+ hasMultipleIntermediateFirstStageRounds)
+ require.Equal(t, testCase.retOffset, offset)
+ })
+ }
+}
+
+func TestCalculateCatchpointRounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type TestCase struct {
+ // input
+ min basics.Round
+ max basics.Round
+ catchpointInterval uint64
+ // output
+ output []basics.Round
+ }
+ testCases := []TestCase{
+ {1, 0, 10, nil},
+ {0, 0, 10, []basics.Round{0}},
+ {11, 19, 10, nil},
+ {11, 20, 10, []basics.Round{20}},
+ {11, 29, 10, []basics.Round{20}},
+ {11, 30, 10, []basics.Round{20, 30}},
+ {10, 20, 10, []basics.Round{10, 20}},
+ {79_680 + 1, 87_000, 10_000, []basics.Round{80_000}},
+ }
+
+ for i, testCase := range testCases {
+ t.Run(fmt.Sprintf("test_case_%d", i), func(t *testing.T) {
+ rounds := calculateCatchpointRounds(
+ testCase.min, testCase.max, testCase.catchpointInterval)
+ require.Equal(t, testCase.output, rounds)
+ })
+ }
+}
+
+// Test that pruning first stage catchpoint database records and catchpoint data files
+// works.
+func TestFirstStageInfoPruning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion :=
+ protocol.ConsensusVersion("test-protocol-TestFirstStageInfoPruning")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 4
+ cfg.CatchpointTracking = 2
+ ct := newCatchpointTracker(t, ml, cfg, ".")
+ defer ct.close()
+
+ temporaryDirectory := t.TempDir()
+ catchpointsDirectory := filepath.Join(temporaryDirectory, CatchpointDirName)
+ err := os.Mkdir(catchpointsDirectory, 0777)
+ require.NoError(t, err)
+
+ ct.dbDirectory = temporaryDirectory
+
+ expectedNumEntries := protoParams.CatchpointLookback / cfg.CatchpointInterval
+
+ numCatchpointsCreated := uint64(0)
+ i := basics.Round(0)
+ lastCatchpointLabel := ""
+
+ for numCatchpointsCreated < expectedNumEntries {
+ i++
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+
+ if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
+ ml.trackers.waitAccountsWriting()
+ catchpointLabel := ct.GetLastCatchpointLabel()
+ require.NotEqual(t, lastCatchpointLabel, catchpointLabel)
+ lastCatchpointLabel = catchpointLabel
+ numCatchpointsCreated++
+ }
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.IsWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ numEntries := uint64(0)
+ i -= basics.Round(cfg.MaxAcctLookback)
+ for i > 0 {
+ _, recordExists, err := selectCatchpointFirstStageInfo(
+ context.Background(), ct.dbs.Rdb.Handle, i)
+ require.NoError(t, err)
+
+ catchpointDataFilePath :=
+ filepath.Join(catchpointsDirectory, makeCatchpointDataFilePath(i))
+ _, err = os.Stat(catchpointDataFilePath)
+ if errors.Is(err, os.ErrNotExist) {
+ require.False(t, recordExists, i)
+ } else {
+ require.NoError(t, err)
+ require.True(t, recordExists, i)
+ numEntries++
+ }
+
+ i--
+ }
+
+ require.Equal(t, expectedNumEntries, numEntries)
+}
+
+// Test that on startup the catchpoint tracker restarts catchpoint's first stage if
+// there is an unfinished first stage record in the database.
+func TestFirstStagePersistence(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion :=
+ protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ tempDirectory := t.TempDir()
+ catchpointsDirectory := filepath.Join(tempDirectory, CatchpointDirName)
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 4
+ cfg.CatchpointTracking = 2
+ cfg.MaxAcctLookback = 0
+ ct := newCatchpointTracker(
+ t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct.close()
+
+ // Add blocks until the first catchpoint first stage round.
+ firstStageRound := basics.Round(4)
+ for i := basics.Round(1); i <= firstStageRound; i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: i,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ }
+
+ ml.trackers.waitAccountsWriting()
+
+ // Check that the data file exists.
+ catchpointDataFilePath :=
+ filepath.Join(catchpointsDirectory, makeCatchpointDataFilePath(firstStageRound))
+ info, err := os.Stat(catchpointDataFilePath)
+ require.NoError(t, err)
+
+ // Override file.
+ err = os.WriteFile(catchpointDataFilePath, []byte{0}, info.Mode().Perm())
+ require.NoError(t, err)
+
+ // Copy the database.
+ ct.close()
+ ml2 := ml.fork(t)
+ require.NotNil(t, ml2)
+ defer ml2.Close()
+ ml.Close()
+
+ // Insert unfinished first stage record.
+ err = writeCatchpointStateUint64(
+ context.Background(), ml2.dbs.Wdb.Handle, catchpointStateWritingFirstStageInfo, 1)
+ require.NoError(t, err)
+
+ // Delete the database record.
+ err = deleteOldCatchpointFirstStageInfo(
+ context.Background(), ml2.dbs.Wdb.Handle, firstStageRound)
+ require.NoError(t, err)
+
+ // Create a catchpoint tracker and let it restart catchpoint's first stage.
+ ct2 := newCatchpointTracker(
+ t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct2.close()
+
+ // Check that the catchpoint data file was rewritten.
+ info, err = os.Stat(catchpointDataFilePath)
+ require.NoError(t, err)
+ require.Greater(t, info.Size(), int64(1))
+
+ // Check that the database record exists.
+ _, exists, err := selectCatchpointFirstStageInfo(
+ context.Background(), ml2.dbs.Rdb.Handle, firstStageRound)
+ require.NoError(t, err)
+ require.True(t, exists)
+
+ // Check that the unfinished first stage record is deleted.
+ v, err := readCatchpointStateUint64(
+ context.Background(), ml2.dbs.Rdb.Handle, catchpointStateWritingFirstStageInfo)
+ require.NoError(t, err)
+ require.Zero(t, v)
+}
+
+// Test that on startup the catchpoint tracker restarts catchpoint's second stage if
+// there is an unfinished catchpoint record in the database.
+func TestSecondStagePersistence(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion :=
+ protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ tempDirectory := t.TempDir()
+ catchpointsDirectory := filepath.Join(tempDirectory, CatchpointDirName)
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 4
+ cfg.CatchpointTracking = 2
+ cfg.MaxAcctLookback = 0
+ ct := newCatchpointTracker(
+ t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct.close()
+
+ secondStageRound := basics.Round(36)
+ firstStageRound := secondStageRound - basics.Round(protoParams.CatchpointLookback)
+ catchpointDataFilePath :=
+ filepath.Join(catchpointsDirectory, makeCatchpointDataFilePath(firstStageRound))
+ var firstStageInfo catchpointFirstStageInfo
+ var catchpointData []byte
+
+ // Add blocks until the first catchpoint round.
+ for i := basics.Round(1); i <= secondStageRound; i++ {
+ if i == secondStageRound {
+ // Save first stage info and data file.
+ var exists bool
+ var err error
+ firstStageInfo, exists, err = selectCatchpointFirstStageInfo(
+ context.Background(), ml.dbs.Rdb.Handle, firstStageRound)
+ require.NoError(t, err)
+ require.True(t, exists)
+
+ catchpointData, err = os.ReadFile(catchpointDataFilePath)
+ require.NoError(t, err)
+ }
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: i,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.IsWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ ml.trackers.waitAccountsWriting()
+
+ // Check that the data file exists.
+ catchpointFilePath :=
+ filepath.Join(catchpointsDirectory, makeCatchpointFilePath(secondStageRound))
+ info, err := os.Stat(catchpointFilePath)
+ require.NoError(t, err)
+
+ // Override file.
+ err = os.WriteFile(catchpointFilePath, []byte{0}, info.Mode().Perm())
+ require.NoError(t, err)
+
+ // Copy the database.
+ ct.close()
+ ml2 := ml.fork(t)
+ require.NotNil(t, ml2)
+ defer ml2.Close()
+ ml.Close()
+
+ // Restore the (first stage) catchpoint data file.
+ err = os.WriteFile(catchpointDataFilePath, catchpointData, 0644)
+ require.NoError(t, err)
+
+ // Restore the first stage database record.
+ err = insertOrReplaceCatchpointFirstStageInfo(
+ context.Background(), ml2.dbs.Wdb.Handle, firstStageRound, &firstStageInfo)
+ require.NoError(t, err)
+
+ // Insert unfinished catchpoint record.
+ err = insertUnfinishedCatchpoint(
+ context.Background(), ml2.dbs.Wdb.Handle, secondStageRound, crypto.Digest{})
+ require.NoError(t, err)
+
+ // Delete the catchpoint file database record.
+ err = storeCatchpoint(
+ context.Background(), ml2.dbs.Wdb.Handle, secondStageRound, "", "", 0)
+ require.NoError(t, err)
+
+ // Create a catchpoint tracker and let it restart catchpoint's second stage.
+ ct2 := newCatchpointTracker(
+ t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct2.close()
+
+ // Check that the catchpoint data file was rewritten.
+ info, err = os.Stat(catchpointFilePath)
+ require.NoError(t, err)
+ require.Greater(t, info.Size(), int64(1))
+
+ // Check that the database record exists.
+ filename, _, _, err := getCatchpoint(
+ context.Background(), ml2.dbs.Rdb.Handle, secondStageRound)
+ require.NoError(t, err)
+ require.NotEmpty(t, filename)
+
+ // Check that the unfinished catchpoint database record is deleted.
+ unfinishedCatchpoints, err := selectUnfinishedCatchpoints(
+ context.Background(), ml2.dbs.Rdb.Handle)
+ require.NoError(t, err)
+ require.Empty(t, unfinishedCatchpoints)
+}
+
+// Test that when catchpoint's first stage record is unavailable
+// (e.g. catchpoints were disabled at first stage), the unfinished catchpoint
+// database record is deleted.
+func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion :=
+ protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ tempDirectory := t.TempDir()
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 4
+ cfg.CatchpointTracking = 0
+ cfg.MaxAcctLookback = 0
+ ct := newCatchpointTracker(
+ t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct.close()
+
+ secondStageRound := basics.Round(36)
+
+ // Add blocks that preceed the first catchpoint round.
+ for i := basics.Round(1); i < secondStageRound; i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: i,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ }
+ ml.trackers.waitAccountsWriting()
+
+ // Copy the database.
+ ct.close()
+ ml2 := ml.fork(t)
+ require.NotNil(t, ml2)
+ defer ml2.Close()
+ ml.Close()
+
+ // Configure a new catchpoint tracker with catchpoints enabled.
+ cfg.CatchpointTracking = 2
+ ct2 := newCatchpointTracker(
+ t, ml2, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
+ defer ct2.close()
+
+ // Add the last block.
+ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: secondStageRound,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml2.trackers.newBlock(blk, delta)
+ ml2.trackers.committedUpTo(secondStageRound)
+ ml2.addMockBlock(blockEntry{block: blk}, delta)
+ }
+ ml2.trackers.waitAccountsWriting()
+
+ // Check that the unfinished catchpoint database record is deleted.
+ unfinishedCatchpoints, err := selectUnfinishedCatchpoints(
+ context.Background(), ml2.dbs.Rdb.Handle)
+ require.NoError(t, err)
+ require.Empty(t, unfinishedCatchpoints)
+}
+
+// Test that on startup the catchpoint tracker deletes the unfinished catchpoint
+// database record when the first stage database record is missing.
+func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion :=
+ protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ protoParams.EnableOnlineAccountCatchpoints = true
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 4
+ cfg.CatchpointTracking = 0
+ cfg.MaxAcctLookback = 0
+ ct := newCatchpointTracker(t, ml, cfg, ".")
+ defer ct.close()
+
+ secondStageRound := basics.Round(36)
+ firstStageRound := secondStageRound - basics.Round(protoParams.CatchpointLookback)
+
+ // Add blocks until the first catchpoint round.
+ for i := basics.Round(1); i <= secondStageRound; i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: i,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: testProtocolVersion,
+ },
+ },
+ }
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.IsWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ ml.trackers.waitAccountsWriting()
+
+ // Copy the database.
+ ct.close()
+ ml2 := ml.fork(t)
+ require.NotNil(t, ml2)
+ defer ml2.Close()
+ ml.Close()
+
+ // Sanity check: first stage record should be deleted.
+ _, exists, err := selectCatchpointFirstStageInfo(
+ context.Background(), ml2.dbs.Rdb.Handle, firstStageRound)
+ require.NoError(t, err)
+ require.False(t, exists)
+
+ // Insert unfinished catchpoint record.
+ err = insertUnfinishedCatchpoint(
+ context.Background(), ml2.dbs.Wdb.Handle, secondStageRound, crypto.Digest{})
+ require.NoError(t, err)
+
+ // Create a catchpoint tracker and let it restart catchpoint's second stage.
+ ct2 := newCatchpointTracker(t, ml2, cfg, ".")
+ defer ct2.close()
+
+ // Check that the unfinished catchpoint database record is deleted.
+ unfinishedCatchpoints, err := selectUnfinishedCatchpoints(
+ context.Background(), ml2.dbs.Rdb.Handle)
+ require.NoError(t, err)
+ require.Empty(t, unfinishedCatchpoints)
+}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 1962e0a7d..7b7f07d2f 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -18,18 +18,16 @@ package ledger
import (
"archive/tar"
- "compress/gzip"
"context"
"database/sql"
"fmt"
+ "io"
"os"
"path/filepath"
"github.com/algorand/msgp/msgp"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -37,12 +35,6 @@ const (
// BalancesPerCatchpointFileChunk defines the number of accounts that would be stored in each chunk in the catchpoint file.
// note that the last chunk would typically be less than this number.
BalancesPerCatchpointFileChunk = 512
-
- // CatchpointFileVersionV5 is the catchpoint file version that was used when the database schema was V0-V5.
- CatchpointFileVersionV5 = uint64(0200)
-
- // CatchpointFileVersionV6 is the catchpoint file version that is matching database schema V6
- CatchpointFileVersionV6 = uint64(0201)
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -50,22 +42,19 @@ const (
// the writing is complete. It might take multiple steps until the operation is over, and the caller
// has the option of throttling the CPU utilization in between the calls.
type catchpointWriter struct {
- ctx context.Context
- tx *sql.Tx
- filePath string
- file *os.File
- gzip *gzip.Writer
- tar *tar.Writer
- headerWritten bool
- balancesOffset int
- balancesChunk catchpointFileBalancesChunkV6
- fileHeader *CatchpointFileHeader
- balancesChunkNum uint64
- writtenBytes int64
- blocksRound basics.Round
- blockHeaderDigest crypto.Digest
- label string
- accountsIterator encodedAccountsBatchIter
+ ctx context.Context
+ tx *sql.Tx
+ filePath string
+ totalAccounts uint64
+ totalChunks uint64
+ file *os.File
+ tar *tar.Writer
+ compressor io.WriteCloser
+ balancesChunk catchpointFileBalancesChunkV6
+ balancesChunkNum uint64
+ writtenBytes int64
+ biggestChunkLen uint64
+ accountsIterator encodedAccountsBatchIter
}
type encodedBalanceRecordV5 struct {
@@ -97,95 +86,53 @@ type catchpointFileBalancesChunkV6 struct {
Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}
-// CatchpointFileHeader is the content we would have in the "content.msgpack" file in the catchpoint tar archive.
-// we need it to be public, as it's being decoded externally by the catchpointdump utility.
-type CatchpointFileHeader struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- Version uint64 `codec:"version"`
- BalancesRound basics.Round `codec:"balancesRound"`
- BlocksRound basics.Round `codec:"blocksRound"`
- Totals ledgercore.AccountTotals `codec:"accountTotals"`
- TotalAccounts uint64 `codec:"accountsCount"`
- TotalChunks uint64 `codec:"chunksCount"`
- Catchpoint string `codec:"catchpoint"`
- BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"`
-}
+func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx) (*catchpointWriter, error) {
+ totalAccounts, err := totalAccounts(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
-func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, blocksRound basics.Round, blockHeaderDigest crypto.Digest, label string) *catchpointWriter {
- return &catchpointWriter{
- ctx: ctx,
- filePath: filePath,
- tx: tx,
- blocksRound: blocksRound,
- blockHeaderDigest: blockHeaderDigest,
- label: label,
+ err = os.MkdirAll(filepath.Dir(filePath), 0700)
+ if err != nil {
+ return nil, err
}
+ file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ compressor, err := catchpointStage1Encoder(file)
+ if err != nil {
+ return nil, err
+ }
+ tar := tar.NewWriter(compressor)
+
+ res := &catchpointWriter{
+ ctx: ctx,
+ tx: tx,
+ filePath: filePath,
+ totalAccounts: totalAccounts,
+ totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
+ file: file,
+ compressor: compressor,
+ tar: tar,
+ }
+ return res, nil
}
func (cw *catchpointWriter) Abort() error {
cw.accountsIterator.Close()
- if cw.tar != nil {
- cw.tar.Close()
- }
- if cw.gzip != nil {
- cw.gzip.Close()
- }
- if cw.file != nil {
- cw.gzip.Close()
- }
- err := os.Remove(cw.filePath)
- return err
+ cw.tar.Close()
+ cw.compressor.Close()
+ cw.file.Close()
+ return os.Remove(cw.filePath)
}
func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err error) {
- if cw.file == nil {
- err = os.MkdirAll(filepath.Dir(cw.filePath), 0700)
- if err != nil {
- return
- }
- cw.file, err = os.OpenFile(cw.filePath, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return
- }
- cw.gzip = gzip.NewWriter(cw.file)
- cw.tar = tar.NewWriter(cw.gzip)
- }
-
// have we timed-out / canceled by that point ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
- if cw.fileHeader == nil {
- err = cw.readHeaderFromDatabase(cw.ctx, cw.tx)
- if err != nil {
- return
- }
- }
-
- // have we timed-out / canceled by that point ?
- if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
- return
- }
-
- if !cw.headerWritten {
- encodedHeader := protocol.Encode(cw.fileHeader)
- err = cw.tar.WriteHeader(&tar.Header{
- Name: "content.msgpack",
- Mode: 0600,
- Size: int64(len(encodedHeader)),
- })
- if err != nil {
- return
- }
- _, err = cw.tar.Write(encodedHeader)
- if err != nil {
- return
- }
- cw.headerWritten = true
- }
-
writerRequest := make(chan catchpointFileBalancesChunkV6, 1)
writerResponse := make(chan error, 2)
go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum)
@@ -235,7 +182,7 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
if len(cw.balancesChunk.Balances) > 0 {
cw.balancesChunkNum++
writerRequest <- cw.balancesChunk
- if len(cw.balancesChunk.Balances) < BalancesPerCatchpointFileChunk || cw.balancesChunkNum == cw.fileHeader.TotalChunks {
+ if len(cw.balancesChunk.Balances) < BalancesPerCatchpointFileChunk || cw.balancesChunkNum == cw.totalChunks {
cw.accountsIterator.Close()
// if we're done, wait for the writer to complete it's writing.
err, opened := <-writerResponse
@@ -263,7 +210,7 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
encodedChunk := protocol.Encode(&bc)
err := cw.tar.WriteHeader(&tar.Header{
- Name: fmt.Sprintf("balances.%d.%d.msgpack", balancesChunkNum, cw.fileHeader.TotalChunks),
+ Name: fmt.Sprintf("balances.%d.%d.msgpack", balancesChunkNum, cw.totalChunks),
Mode: 0600,
Size: int64(len(encodedChunk)),
})
@@ -276,12 +223,14 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
response <- err
break
}
+ if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
+ cw.biggestChunkLen = chunkLen
+ }
- if len(bc.Balances) < BalancesPerCatchpointFileChunk || balancesChunkNum == cw.fileHeader.TotalChunks {
+ if len(bc.Balances) < BalancesPerCatchpointFileChunk || balancesChunkNum == cw.totalChunks {
cw.tar.Close()
- cw.gzip.Close()
+ cw.compressor.Close()
cw.file.Close()
- cw.file = nil
var fileInfo os.FileInfo
fileInfo, err = os.Stat(cw.filePath)
if err != nil {
@@ -296,32 +245,6 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
cw.balancesChunk.Balances, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk)
- if err == nil {
- cw.balancesOffset += BalancesPerCatchpointFileChunk
- }
- return
-}
-
-func (cw *catchpointWriter) readHeaderFromDatabase(ctx context.Context, tx *sql.Tx) (err error) {
- var header CatchpointFileHeader
- header.BalancesRound, err = accountsRound(tx)
- if err != nil {
- return
- }
- header.Totals, err = accountsTotals(tx, false)
- if err != nil {
- return
- }
- header.TotalAccounts, err = totalAccounts(context.Background(), tx)
- if err != nil {
- return
- }
- header.TotalChunks = (header.TotalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk
- header.BlocksRound = cw.blocksRound
- header.Catchpoint = cw.label
- header.Version = CatchpointFileVersionV6
- header.BlockHeaderDigest = cw.blockHeaderDigest
- cw.fileHeader = &header
return
}
@@ -330,28 +253,17 @@ func (cw *catchpointWriter) GetSize() int64 {
return cw.writtenBytes
}
-// GetBalancesRound returns the round number of the balances to which this catchpoint is generated for.
-func (cw *catchpointWriter) GetBalancesRound() basics.Round {
- if cw.fileHeader != nil {
- return cw.fileHeader.BalancesRound
- }
- return basics.Round(0)
-}
-
// GetBalancesCount returns the number of balances written to this catchpoint file.
func (cw *catchpointWriter) GetTotalAccounts() uint64 {
- if cw.fileHeader != nil {
- return cw.fileHeader.TotalAccounts
- }
- return 0
+ return cw.totalAccounts
}
-// GetCatchpoint returns the catchpoint string to which this catchpoint file was generated for.
-func (cw *catchpointWriter) GetCatchpoint() string {
- if cw.fileHeader != nil {
- return cw.fileHeader.Catchpoint
- }
- return ""
+func (cw *catchpointWriter) GetTotalChunks() uint64 {
+ return cw.totalChunks
+}
+
+func (cw *catchpointWriter) GetBiggestChunkLen() uint64 {
+ return cw.biggestChunkLen
}
// hasContextDeadlineExceeded examine the given context and see if it was canceled or timed-out.
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 18010dda8..fa1819d97 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -25,7 +25,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "os"
"path/filepath"
"runtime"
"testing"
@@ -57,7 +56,7 @@ func makeTestEncodedBalanceRecordV5(t *testing.T) encodedBalanceRecordV5 {
oneTimeSecrets := crypto.GenerateOneTimeSignatureSecrets(0, 1)
vrfSecrets := crypto.GenerateVRFSecrets()
var stateProofID merklesignature.Verifier
- crypto.RandBytes(stateProofID[:])
+ crypto.RandBytes(stateProofID.Commitment[:])
ad := basics.AccountData{
Status: basics.NotParticipating,
@@ -66,7 +65,7 @@ func makeTestEncodedBalanceRecordV5(t *testing.T) encodedBalanceRecordV5 {
RewardedMicroAlgos: basics.MicroAlgos{},
VoteID: oneTimeSecrets.OneTimeSignatureVerifier,
SelectionID: vrfSecrets.PK,
- StateProofID: stateProofID,
+ StateProofID: stateProofID.Commitment,
VoteFirstValid: basics.Round(0x1234123412341234),
VoteLastValid: basics.Round(0x1234123412341234),
VoteKeyDilution: 0x1234123412341234,
@@ -198,14 +197,11 @@ func TestBasicCatchpointWriter(t *testing.T) {
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBasicCatchpointWriter")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 32
- protoParams.SeedLookback = 2
- protoParams.SeedRefreshInterval = 8
+ protoParams.CatchpointLookback = 32
config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectroy, _ := ioutil.TempDir(os.TempDir(), CatchpointDirName)
+ temporaryDirectroy := t.TempDir()
defer func() {
delete(config.Consensus, testProtocolVersion)
- os.RemoveAll(temporaryDirectroy)
}()
accts := ledgertesting.RandomAccounts(300, false)
@@ -215,18 +211,18 @@ func TestBasicCatchpointWriter(t *testing.T) {
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
- fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
- blocksRound := basics.Round(12345)
- blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
- catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
+ fileName := filepath.Join(temporaryDirectroy, "15.data")
readDb := ml.trackerDB().Rdb
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer := makeCatchpointWriter(context.Background(), fileName, tx, blocksRound, blockHeaderDigest, catchpointLabel)
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx)
+ if err != nil {
+ return err
+ }
for {
more, err := writer.WriteStep(context.Background())
require.NoError(t, err)
@@ -241,53 +237,40 @@ func TestBasicCatchpointWriter(t *testing.T) {
// load the file from disk.
fileContent, err := ioutil.ReadFile(fileName)
require.NoError(t, err)
- gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
+ compressorReader, err := catchpointStage1Decoder(bytes.NewBuffer(fileContent))
require.NoError(t, err)
- tarReader := tar.NewReader(gzipReader)
- defer gzipReader.Close()
- for {
- header, err := tarReader.Next()
+ defer compressorReader.Close()
+ tarReader := tar.NewReader(compressorReader)
+
+ header, err := tarReader.Next()
+ require.NoError(t, err)
+
+ balancesBlockBytes := make([]byte, header.Size)
+ readComplete := int64(0)
+
+ for readComplete < header.Size {
+ bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
+ readComplete += int64(bytesRead)
if err != nil {
if err == io.EOF {
- break
+ if readComplete == header.Size {
+ break
+ }
+ require.NoError(t, err)
}
- require.NoError(t, err)
break
}
- balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
+ }
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- require.NoError(t, err)
- }
- break
- }
- }
+ require.Equal(t, "balances.1.1.msgpack", header.Name)
- if header.Name == "content.msgpack" {
- var fileHeader CatchpointFileHeader
- err = protocol.Decode(balancesBlockBytes, &fileHeader)
- require.NoError(t, err)
- require.Equal(t, catchpointLabel, fileHeader.Catchpoint)
- require.Equal(t, blocksRound, fileHeader.BlocksRound)
- require.Equal(t, blockHeaderDigest, fileHeader.BlockHeaderDigest)
- require.Equal(t, uint64(len(accts)), fileHeader.TotalAccounts)
- } else if header.Name == "balances.1.1.msgpack" {
- var balances catchpointFileBalancesChunkV6
- err = protocol.Decode(balancesBlockBytes, &balances)
- require.NoError(t, err)
- require.Equal(t, uint64(len(accts)), uint64(len(balances.Balances)))
- } else {
- require.Failf(t, "unexpected tar chunk name", "tar chunk name %s", header.Name)
- }
- }
+ var balances catchpointFileBalancesChunkV6
+ err = protocol.Decode(balancesBlockBytes, &balances)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(accts)), uint64(len(balances.Balances)))
+
+ _, err = tarReader.Next()
+ require.Equal(t, io.EOF, err)
}
func TestFullCatchpointWriter(t *testing.T) {
@@ -296,14 +279,11 @@ func TestFullCatchpointWriter(t *testing.T) {
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 32
- protoParams.SeedLookback = 2
- protoParams.SeedRefreshInterval = 8
+ protoParams.CatchpointLookback = 32
config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectroy, _ := ioutil.TempDir(os.TempDir(), CatchpointDirName)
+ temporaryDirectory := t.TempDir()
defer func() {
delete(config.Consensus, testProtocolVersion)
- os.RemoveAll(temporaryDirectroy)
}()
accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
@@ -313,17 +293,23 @@ func TestFullCatchpointWriter(t *testing.T) {
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
- fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
- blocksRound := basics.Round(12345)
- blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
- catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
readDb := ml.trackerDB().Rdb
+ var totalAccounts uint64
+ var totalChunks uint64
+ var biggestChunkLen uint64
+ var accountsRnd basics.Round
+ var totals ledgercore.AccountTotals
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer := makeCatchpointWriter(context.Background(), fileName, tx, blocksRound, blockHeaderDigest, catchpointLabel)
+ writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx)
+ if err != nil {
+ return err
+ }
for {
more, err := writer.WriteStep(context.Background())
require.NoError(t, err)
@@ -331,9 +317,34 @@ func TestFullCatchpointWriter(t *testing.T) {
break
}
}
+ totalAccounts = writer.GetTotalAccounts()
+ totalChunks = writer.GetTotalChunks()
+ biggestChunkLen = writer.GetBiggestChunkLen()
+ accountsRnd, err = accountsRound(tx)
+ if err != nil {
+ return
+ }
+ totals, err = accountsTotals(ctx, tx, false)
return
})
require.NoError(t, err)
+ blocksRound := accountsRnd + 1
+ blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
+ catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
+ catchpointFileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: accountsRnd,
+ BlocksRound: blocksRound,
+ Totals: totals,
+ TotalAccounts: totalAccounts,
+ TotalChunks: totalChunks,
+ Catchpoint: catchpointLabel,
+ BlockHeaderDigest: blockHeaderDigest,
+ }
+ err = repackCatchpoint(
+ context.Background(), catchpointFileHeader, biggestChunkLen,
+ catchpointDataFilePath, catchpointFilePath)
+ require.NoError(t, err)
// create a ledger.
var initState ledgercore.InitState
@@ -347,7 +358,7 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// load the file from disk.
- fileContent, err := ioutil.ReadFile(fileName)
+ fileContent, err := ioutil.ReadFile(catchpointFilePath)
require.NoError(t, err)
gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
require.NoError(t, err)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index bbfd72f77..ec05c86af 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -94,9 +94,6 @@ type CatchpointCatchupAccessorImpl struct {
// log copied from ledger
log logging.Logger
-
- // Prepared SQL statements for fast accounts DB lookups.
- accountsq *accountsDbQueries
}
// CatchpointCatchupState is the state of the current catchpoint catchup process
@@ -109,7 +106,7 @@ const (
CatchpointCatchupStateLedgerDownload
// CatchpointCatchupStateLastestBlockDownload indicates that we're download the latest block
CatchpointCatchupStateLastestBlockDownload
- // CatchpointCatchupStateBlocksDownload indicates that we're downloading the blocks prior to the latest one ( total of MaxBalLookback blocks )
+ // CatchpointCatchupStateBlocksDownload indicates that we're downloading the blocks prior to the latest one ( total of CatchpointLookback blocks )
CatchpointCatchupStateBlocksDownload
// CatchpointCatchupStateSwitch indicates that we're switching to use the downloaded ledger/blocks content
CatchpointCatchupStateSwitch
@@ -120,24 +117,16 @@ const (
// MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
- rdb := ledger.trackerDB().Rdb
- wdb := ledger.trackerDB().Wdb
- accountsq, err := accountsInitDbQueries(rdb.Handle, wdb.Handle)
- if err != nil {
- log.Warnf("unable to initialize account db in MakeCatchpointCatchupAccessor : %v", err)
- return nil
- }
return &CatchpointCatchupAccessorImpl{
- ledger: ledger,
- log: log,
- accountsq: accountsq,
+ ledger: ledger,
+ log: log,
}
}
// GetState returns the current state of the catchpoint catchup
func (c *CatchpointCatchupAccessorImpl) GetState(ctx context.Context) (state CatchpointCatchupState, err error) {
var istate uint64
- istate, _, err = c.accountsq.readCatchpointStateUint64(ctx, catchpointStateCatchupState)
+ istate, err = readCatchpointStateUint64(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupState)
if err != nil {
return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupState, err)
}
@@ -150,7 +139,7 @@ func (c *CatchpointCatchupAccessorImpl) SetState(ctx context.Context, state Catc
if state < CatchpointCatchupStateInactive || state > catchpointCatchupStateLast {
return fmt.Errorf("invalid catchpoint catchup state provided : %d", state)
}
- _, err = c.accountsq.writeCatchpointStateUint64(ctx, catchpointStateCatchupState, uint64(state))
+ err = writeCatchpointStateUint64(ctx, c.ledger.trackerDB().Wdb.Handle, catchpointStateCatchupState, uint64(state))
if err != nil {
return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupState, err)
}
@@ -159,7 +148,7 @@ func (c *CatchpointCatchupAccessorImpl) SetState(ctx context.Context, state Catc
// GetLabel returns the current catchpoint catchup label
func (c *CatchpointCatchupAccessorImpl) GetLabel(ctx context.Context) (label string, err error) {
- label, _, err = c.accountsq.readCatchpointStateString(ctx, catchpointStateCatchupLabel)
+ label, err = readCatchpointStateString(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupLabel)
if err != nil {
return "", fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupLabel, err)
}
@@ -173,7 +162,7 @@ func (c *CatchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label stri
if err != nil {
return
}
- _, err = c.accountsq.writeCatchpointStateString(ctx, catchpointStateCatchupLabel, label)
+ err = writeCatchpointStateString(ctx, c.ledger.trackerDB().Wdb.Handle, catchpointStateCatchupLabel, label)
if err != nil {
return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupLabel, err)
}
@@ -194,26 +183,21 @@ func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
return fmt.Errorf("unable to reset catchpoint catchup balances : %v", err)
}
if !newCatchup {
- sq, err := accountsInitDbQueries(tx, tx)
- if err != nil {
- return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
- }
- defer sq.close()
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBalancesRound, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBalancesRound, 0)
if err != nil {
return err
}
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBlockRound, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBlockRound, 0)
if err != nil {
return err
}
- _, err = sq.writeCatchpointStateString(ctx, catchpointStateCatchupLabel, "")
+ err = writeCatchpointStateString(ctx, tx, catchpointStateCatchupLabel, "")
if err != nil {
return err
}
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupState, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupState, 0)
if err != nil {
return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupState, err)
}
@@ -236,8 +220,7 @@ type CatchpointCatchupAccessorProgress struct {
// Having the cachedTrie here would help to accelerate the catchup process since the trie maintain an internal cache of nodes.
// While rebuilding the trie, we don't want to force and reload (some) of these nodes into the cache for each catchpoint file chunk.
- cachedTrie *merkletrie.Trie
- evictFrequency uint64
+ cachedTrie *merkletrie.Trie
BalancesWriteDuration time.Duration
CreatablesWriteDuration time.Duration
@@ -281,17 +264,12 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
start := time.Now()
ledgerProcessstagingcontentCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsInitDbQueries(tx, tx)
- if err != nil {
- return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to initialize accountsDbInit: %v", err)
- }
- defer sq.close()
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound))
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBlockRound, uint64(fileHeader.BlocksRound))
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupBlockRound, err)
}
if fileHeader.Version == CatchpointFileVersionV6 {
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound))
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound))
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupHashRound, err)
}
@@ -622,7 +600,6 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
if err != nil {
errChan <- err
}
- return
}()
wg.Wait()
@@ -639,7 +616,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
// GetCatchupBlockRound returns the latest block round matching the current catchpoint
func (c *CatchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
var iRound uint64
- iRound, _, err = c.accountsq.readCatchpointStateUint64(ctx, catchpointStateCatchupBlockRound)
+ iRound, err = readCatchpointStateUint64(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupBlockRound)
if err != nil {
return 0, fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupBlockRound, err)
}
@@ -654,13 +631,13 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
var totals ledgercore.AccountTotals
var catchpointLabel string
- catchpointLabel, _, err = c.accountsq.readCatchpointStateString(ctx, catchpointStateCatchupLabel)
+ catchpointLabel, err = readCatchpointStateString(ctx, rdb.Handle, catchpointStateCatchupLabel)
if err != nil {
return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupLabel, err)
}
var iRound uint64
- iRound, _, err = c.accountsq.readCatchpointStateUint64(ctx, catchpointStateCatchupBlockRound)
+ iRound, err = readCatchpointStateUint64(ctx, rdb.Handle, catchpointStateCatchupBlockRound)
if err != nil {
return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupBlockRound, err)
}
@@ -685,7 +662,7 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
return fmt.Errorf("unable to get trie root hash: %v", err)
}
- totals, err = accountsTotals(tx, true)
+ totals, err = accountsTotals(ctx, tx, true)
if err != nil {
return fmt.Errorf("unable to get accounts totals: %v", err)
}
@@ -712,17 +689,16 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error) {
// calculate the balances round and store it. It *should* be identical to the one in the catchpoint file header, but we don't want to
// trust the one in the catchpoint file header, so we'll calculate it ourselves.
- balancesRound := blk.Round() - basics.Round(config.Consensus[blk.CurrentProtocol].MaxBalLookback)
+ catchpointLookback := config.Consensus[blk.CurrentProtocol].CatchpointLookback
+ if catchpointLookback == 0 {
+ catchpointLookback = config.Consensus[blk.CurrentProtocol].MaxBalLookback
+ }
+ balancesRound := blk.Round() - basics.Round(catchpointLookback)
wdb := c.ledger.trackerDB().Wdb
start := time.Now()
ledgerStorebalancesroundCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsInitDbQueries(tx, tx)
- if err != nil {
- return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to initialize accountsDbInit: %v", err)
- }
- defer sq.close()
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBalancesRound, uint64(balancesRound))
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBalancesRound, uint64(balancesRound))
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupBalancesRound, err)
}
@@ -821,29 +797,47 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
var balancesRound, hashRound uint64
var totals ledgercore.AccountTotals
- sq, err := accountsInitDbQueries(tx, tx)
+ balancesRound, err = readCatchpointStateUint64(ctx, tx, catchpointStateCatchupBalancesRound)
if err != nil {
- return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
+ return err
}
- defer sq.close()
- balancesRound, _, err = sq.readCatchpointStateUint64(ctx, catchpointStateCatchupBalancesRound)
+ hashRound, err = readCatchpointStateUint64(ctx, tx, catchpointStateCatchupHashRound)
if err != nil {
return err
}
- hashRound, _, err = sq.readCatchpointStateUint64(ctx, catchpointStateCatchupHashRound)
+ totals, err = accountsTotals(ctx, tx, true)
if err != nil {
return err
}
- totals, err = accountsTotals(tx, true)
+ if hashRound == 0 {
+ err = resetAccountHashes(ctx, tx)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Reset the database to version 6. For now, we create a version 6 database from
+ // the catchpoint and let `reloadLedger()` run the normal database migration.
+ // When implementing a new catchpoint format (e.g. adding a new table),
+ // it might be necessary to restore it into the latest database version. To do that, one
+ // will need to run the 6->7 migration code manually here or in a similar function to create
+ // onlineaccounts and other V7 tables.
+ err = accountsReset(ctx, tx)
if err != nil {
return err
}
-
- if hashRound == 0 {
- err = resetAccountHashes(tx)
+ {
+ tp := trackerDBParams{
+ initAccounts: c.ledger.GenesisAccounts(),
+ initProto: c.ledger.GenesisProtoVersion(),
+ catchpointEnabled: c.ledger.catchpoint.catchpointEnabled(),
+ dbPathPrefix: c.ledger.catchpoint.dbDirectory,
+ blockDb: c.ledger.blockDBs,
+ }
+ _, err = runMigrations(ctx, tx, tp, c.ledger.log, 6 /*target database version*/)
if err != nil {
return err
}
@@ -864,29 +858,29 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBalancesRound, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBalancesRound, 0)
if err != nil {
return err
}
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupBlockRound, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupBlockRound, 0)
if err != nil {
return err
}
- _, err = sq.writeCatchpointStateString(ctx, catchpointStateCatchupLabel, "")
+ err = writeCatchpointStateString(ctx, tx, catchpointStateCatchupLabel, "")
if err != nil {
return err
}
if hashRound != 0 {
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupHashRound, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupHashRound, 0)
if err != nil {
return err
}
}
- _, err = sq.writeCatchpointStateUint64(ctx, catchpointStateCatchupState, 0)
+ err = writeCatchpointStateUint64(ctx, tx, catchpointStateCatchupState, 0)
if err != nil {
return fmt.Errorf("unable to write catchpoint catchup state '%s': %v", catchpointStateCatchupState, err)
}
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
index e80aa9147..265579cf3 100644
--- a/ledger/evalbench_test.go
+++ b/ledger/evalbench_test.go
@@ -163,8 +163,7 @@ func (g *benchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Addre
appIdxPerm := rand.Perm(g.NumApps)
for j := 0; j < rand.Int()%(maxAppsOptedIn+1); j++ {
- var appIdx basics.AppIndex
- appIdx = basics.AppIndex(appIdxPerm[j] + 1)
+ appIdx := basics.AppIndex(appIdxPerm[j] + 1)
acctOptIns[appIdx] = struct{}{}
txn := transactions.Transaction{
diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go
index a27d6118b..5f11874c4 100644
--- a/ledger/evalindexer.go
+++ b/ledger/evalindexer.go
@@ -43,6 +43,8 @@ type indexerLedgerForEval interface {
GetAssetCreator(map[basics.AssetIndex]struct{}) (map[basics.AssetIndex]FoundAddress, error)
GetAppCreator(map[basics.AppIndex]struct{}) (map[basics.AppIndex]FoundAddress, error)
LatestTotals() (ledgercore.AccountTotals, error)
+
+ BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
}
// FoundAddress is a wrapper for an address and a boolean.
@@ -87,6 +89,11 @@ func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockH
return l.il.LatestBlockHdr()
}
+// BlockHdrCached is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return l.il.BlockHdrCached(round)
+}
+
// CheckDup is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
// This function is not used by evaluator.
@@ -190,10 +197,10 @@ func (l indexerLedgerConnector) LatestTotals() (rnd basics.Round, totals ledgerc
return
}
-// CompactCertVoters is part of LedgerForEvaluator interface.
-func (l indexerLedgerConnector) CompactCertVoters(_ basics.Round) (*ledgercore.VotersForRound, error) {
+// VotersForStateProof is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) VotersForStateProof(_ basics.Round) (*ledgercore.VotersForRound, error) {
// This function is not used by evaluator.
- return nil, errors.New("CompactCertVoters() not implemented")
+ return nil, errors.New("VotersForStateProof() not implemented")
}
func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Digest, genesisProto config.ConsensusParams, latestRound basics.Round, roundResources EvalForIndexerResources) indexerLedgerConnector {
diff --git a/ledger/evalindexer_test.go b/ledger/evalindexer_test.go
index 8e4d4f9f7..23997afa5 100644
--- a/ledger/evalindexer_test.go
+++ b/ledger/evalindexer_test.go
@@ -47,6 +47,10 @@ func (il indexerLedgerForEvalImpl) LatestBlockHdr() (bookkeeping.BlockHeader, er
return il.l.BlockHdr(il.latestRound)
}
+func (il indexerLedgerForEvalImpl) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return il.l.BlockHdrCached(round)
+}
+
// The value of the returned map is nil iff the account was not found.
func (il indexerLedgerForEvalImpl) LookupWithoutRewards(addresses map[basics.Address]struct{}) (map[basics.Address]*ledgercore.AccountData, error) {
res := make(map[basics.Address]*ledgercore.AccountData)
@@ -318,6 +322,7 @@ func TestResourceCaching(t *testing.T) {
Timestamp: 0,
}
l := newTestLedger(t, genesisBalances)
+ defer l.Close()
genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
diff --git a/ledger/internal/appcow_test.go b/ledger/internal/appcow_test.go
index 07669d61e..76b4f1f8d 100644
--- a/ledger/internal/appcow_test.go
+++ b/ledger/internal/appcow_test.go
@@ -91,11 +91,15 @@ func (ml *emptyLedger) txnCounter() uint64 {
return 0
}
-func (ml *emptyLedger) blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+func (ml *emptyLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
-func (ml *emptyLedger) compactCertNext() basics.Round {
+func (ml *emptyLedger) blockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, nil
+}
+
+func (ml *emptyLedger) GetStateProofNextRound() basics.Round {
return basics.Round(0)
}
diff --git a/ledger/internal/applications.go b/ledger/internal/applications.go
index 8c56331dd..27d306ac9 100644
--- a/ledger/internal/applications.go
+++ b/ledger/internal/applications.go
@@ -20,6 +20,7 @@ import (
"fmt"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
@@ -48,6 +49,9 @@ type cowForLogicLedger interface {
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
txnCounter() uint64
incTxnCount()
+
+ // The method should use the txtail to ensure MaxTxnLife+1 headers back are available
+ blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error)
}
func newLogicLedger(cow cowForLogicLedger) *logicLedger {
@@ -153,6 +157,10 @@ func (al *logicLedger) LatestTimestamp() int64 {
return al.cow.prevTimestamp()
}
+func (al *logicLedger) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return al.cow.blockHdrCached(round)
+}
+
func (al *logicLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
return al.cow.allocated(addr, appIdx, false)
}
@@ -274,9 +282,9 @@ func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
// We don't check min balances during in app txns.
- // func (eval *BlockEvaluator) checkMinBalance will take care of
- // it when the top-level txn concludes, as because cow will return
- // all changed accounts in modifiedAccounts().
+ // func (eval *BlockEvaluator) checkMinBalance will take care of it when the
+ // top-level txn concludes, because cow will return all changed accounts in
+ // modifiedAccounts().
return nil
diff --git a/ledger/internal/applications_test.go b/ledger/internal/applications_test.go
index 2b336970d..ea28712c9 100644
--- a/ledger/internal/applications_test.go
+++ b/ledger/internal/applications_test.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -145,12 +146,17 @@ func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIn
return found, nil
}
+func (c *mockCowForLogicLedger) txnCounter() uint64 {
+ return c.txc
+}
+
func (c *mockCowForLogicLedger) incTxnCount() {
c.txc++
}
-func (c *mockCowForLogicLedger) txnCounter() uint64 {
- return c.txc
+// No unit tests care about this yet, so this is a lame implementation
+func (c *mockCowForLogicLedger) blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{Round: round}, nil
}
func newCowMock(creatables []modsData) *mockCowForLogicLedger {
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index 94f72b0e2..fdbc64aca 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -300,7 +300,7 @@ submit: itxn_submit
require.True(t, in)
require.Equal(t, amount, uint64(0))
- // Now, suceed, because opted in.
+ // Now, succeed, because opted in.
eval = nextBlock(t, l)
txn(t, l, eval, &fundgold)
endBlock(t, l, eval)
@@ -1890,7 +1890,7 @@ func TestInnerAppVersionCalling(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // 31 allowed inner appls. v33 lowered proto.MinInnerApplVersion
+ // 31 allowed inner appls. v34 lowered proto.MinInnerApplVersion
testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
defer dl.Close()
@@ -1977,7 +1977,7 @@ itxn_begin
itxn_submit`,
}
- if ver <= 32 {
+ if ver <= 33 {
dl.txn(&call, "inner app call with version v5 < v6")
call.ForeignApps[0] = v6id
dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
@@ -1986,7 +1986,7 @@ itxn_submit`,
createAndOptin.ApplicationArgs = [][]byte{three.Program, three.Program}
dl.txn(&createAndOptin, "inner app call with version v3 < v6")
- // nor v5 in proto ver 32
+ // nor v5 in proto ver 33
createAndOptin.ApplicationArgs = [][]byte{five.Program, five.Program}
dl.txn(&createAndOptin, "inner app call with version v5 < v6")
@@ -1994,7 +1994,7 @@ itxn_submit`,
createAndOptin.ApplicationArgs = [][]byte{six.Program, six.Program}
dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend
} else {
- // after 32 proto.MinInnerApplVersion is lowered to 4, so calls and optins to v5 are ok
+ // after 33 proto.MinInnerApplVersion is lowered to 4, so calls and optins to v5 are ok
dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
dl.txn(&optin, "overspend") // it tried to execute, but test doesn't bother funding
optin.ForeignApps[0] = v5withv3csp // but we can't optin to a v5 if it has an old csp
@@ -2152,7 +2152,7 @@ func TestAppDowngrade(t *testing.T) {
// Downgrade (allowed for pre 6 programs until MinInnerApplVersion was lowered)
update.ClearStateProgram = four.Program
- if ver <= 32 {
+ if ver <= 33 {
dl.fullBlock(update.Noted("actually a repeat of first upgrade"))
} else {
dl.txn(update.Noted("actually a repeat of first upgrade"), "clearstate program version downgrade")
@@ -3146,7 +3146,7 @@ itxn_submit
}
dl.beginBlock()
- if ver <= 32 {
+ if ver <= 33 {
dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
dl.endBlock()
return
diff --git a/ledger/internal/compactcert.go b/ledger/internal/compactcert.go
deleted file mode 100644
index ca2be6d19..000000000
--- a/ledger/internal/compactcert.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal
-
-import (
- "fmt"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// AcceptableCompactCertWeight computes the acceptable signed weight
-// of a compact cert if it were to appear in a transaction with a
-// particular firstValid round. Earlier rounds require a smaller cert.
-// votersHdr specifies the block that contains the Merkle commitment of
-// the voters for this compact cert (and thus the compact cert is for
-// votersHdr.Round() + CompactCertRounds).
-//
-// logger must not be nil; use at least logging.Base()
-func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
- proto := config.Consensus[votersHdr.CurrentProtocol]
- certRound := votersHdr.Round + basics.Round(proto.CompactCertRounds)
- total := votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal
-
- // The acceptable weight depends on the elapsed time (in rounds)
- // from the block we are trying to construct a certificate for.
- // Start by subtracting the round number of the block being certified.
- // If that round hasn't even passed yet, require 100% votes in cert.
- offset := firstValid.SubSaturate(certRound)
- if offset == 0 {
- return total.ToUint64()
- }
-
- // During the first proto.CompactCertRound/2 + 1 + 1 blocks, the
- // signatures are still being broadcast, so, continue requiring
- // 100% votes.
- //
- // The first +1 comes from CompactCertWorker.broadcastSigs: it only
- // broadcasts signatures for round R starting with round R+1, to
- // ensure nodes have the block for round R already in their ledger,
- // to check the sig.
- //
- // The second +1 comes from the fact that, if we are checking this
- // acceptable weight to decide whether to allow this transaction in
- // a block, the transaction was sent out one round ago.
- offset = offset.SubSaturate(basics.Round(proto.CompactCertRounds/2 + 2))
- if offset == 0 {
- return total.ToUint64()
- }
-
- // In the next proto.CompactCertRounds/2 blocks, linearly scale
- // the acceptable weight from 100% to CompactCertWeightThreshold.
- // If we are outside of that window, accept any weight at or above
- // CompactCertWeightThreshold.
- provenWeight, overflowed := basics.Muldiv(total.ToUint64(), uint64(proto.CompactCertWeightThreshold), 1<<32)
- if overflowed || provenWeight > total.ToUint64() {
- // Shouldn't happen, but a safe fallback is to accept a larger cert.
- logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight",
- total, proto.CompactCertRounds, certRound, firstValid)
- return 0
- }
-
- if offset >= basics.Round(proto.CompactCertRounds/2) {
- return provenWeight
- }
-
- scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, proto.CompactCertRounds/2-uint64(offset), proto.CompactCertRounds/2)
- if overflowed {
- // Shouldn't happen, but a safe fallback is to accept a larger cert.
- logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow scaledWeight",
- total, proto.CompactCertRounds, certRound, firstValid)
- return 0
- }
-
- w, overflowed := basics.OAdd(provenWeight, scaledWeight)
- if overflowed {
- // Shouldn't happen, but a safe fallback is to accept a larger cert.
- logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
- total, proto.CompactCertRounds, certRound, firstValid, provenWeight, scaledWeight)
- return 0
- }
-
- return w
-}
-
-// CompactCertParams computes the parameters for building or verifying
-// a compact cert for block hdr, using voters from block votersHdr.
-func CompactCertParams(votersHdr bookkeeping.BlockHeader, hdr bookkeeping.BlockHeader) (res compactcert.Params, err error) {
- proto := config.Consensus[votersHdr.CurrentProtocol]
-
- if proto.CompactCertRounds == 0 {
- err = fmt.Errorf("compact certs not enabled")
- return
- }
-
- if votersHdr.Round%basics.Round(proto.CompactCertRounds) != 0 {
- err = fmt.Errorf("votersHdr %d not a multiple of %d",
- votersHdr.Round, proto.CompactCertRounds)
- return
- }
-
- if hdr.Round != votersHdr.Round+basics.Round(proto.CompactCertRounds) {
- err = fmt.Errorf("certifying block %d not %d ahead of voters %d",
- hdr.Round, proto.CompactCertRounds, votersHdr.Round)
- return
- }
-
- totalWeight := votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal.ToUint64()
- provenWeight, overflowed := basics.Muldiv(totalWeight, uint64(proto.CompactCertWeightThreshold), 1<<32)
- if overflowed {
- err = fmt.Errorf("overflow computing provenWeight[%d]: %d * %d / (1<<32)",
- hdr.Round, totalWeight, proto.CompactCertWeightThreshold)
- return
- }
-
- res = compactcert.Params{
- Msg: hdr,
- ProvenWeight: provenWeight,
- SigRound: hdr.Round,
- SecKQ: proto.CompactCertSecKQ,
- }
- return
-}
-
-// validateCompactCert checks that a compact cert is valid.
-func validateCompactCert(certHdr bookkeeping.BlockHeader, cert compactcert.Cert, votersHdr bookkeeping.BlockHeader, nextCertRnd basics.Round, atRound basics.Round) error {
- proto := config.Consensus[certHdr.CurrentProtocol]
-
- if proto.CompactCertRounds == 0 {
- return fmt.Errorf("compact certs not enabled: rounds = %d", proto.CompactCertRounds)
- }
-
- if certHdr.Round%basics.Round(proto.CompactCertRounds) != 0 {
- return fmt.Errorf("cert at %d for non-multiple of %d", certHdr.Round, proto.CompactCertRounds)
- }
-
- votersRound := certHdr.Round.SubSaturate(basics.Round(proto.CompactCertRounds))
- if votersRound != votersHdr.Round {
- return fmt.Errorf("new cert is for %d (voters %d), but votersHdr from %d",
- certHdr.Round, votersRound, votersHdr.Round)
- }
-
- if nextCertRnd == 0 || nextCertRnd != certHdr.Round {
- return fmt.Errorf("expecting cert for %d, but new cert is for %d (voters %d)",
- nextCertRnd, certHdr.Round, votersRound)
- }
-
- acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound, logging.Base())
- if cert.SignedWeight < acceptableWeight {
- return fmt.Errorf("insufficient weight at %d: %d < %d",
- atRound, cert.SignedWeight, acceptableWeight)
- }
-
- ccParams, err := CompactCertParams(votersHdr, certHdr)
- if err != nil {
- return err
- }
-
- verif := compactcert.MkVerifier(ccParams, votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVoters)
- return verif.Verify(&cert)
-}
diff --git a/ledger/internal/compactcert_test.go b/ledger/internal/compactcert_test.go
deleted file mode 100644
index 9bf83361c..000000000
--- a/ledger/internal/compactcert_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestValidateCompactCert(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var certHdr bookkeeping.BlockHeader
- var cert compactcert.Cert
- var votersHdr bookkeeping.BlockHeader
- var nextCertRnd basics.Round
- var atRound basics.Round
-
- // will definitely fail with nothing set up
- err := validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- t.Log(err)
- require.NotNil(t, err)
-
- certHdr.CurrentProtocol = "TestValidateCompactCert"
- certHdr.Round = 1
- proto := config.Consensus[certHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[certHdr.CurrentProtocol] = proto
-
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- certHdr.Round = 4
- votersHdr.Round = 4
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- votersHdr.Round = 2
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- nextCertRnd = 4
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- votersHdr.CurrentProtocol = certHdr.CurrentProtocol
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState)
- cc := votersHdr.CompactCert[protocol.CompactCertBasic]
- cc.CompactCertVotersTotal.Raw = 100
- votersHdr.CompactCert[protocol.CompactCertBasic] = cc
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- cert.SignedWeight = 101
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
- // still err, but a different err case to cover
- t.Log(err)
- require.NotNil(t, err)
-
- // Above cases leave validateCompactCert() with 100% coverage.
- // crypto/compactcert.Verify has its own tests
-}
-
-func TestAcceptableCompactCertWeight(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var votersHdr bookkeeping.BlockHeader
- var firstValid basics.Round
- logger := logging.TestingLog(t)
-
- votersHdr.CurrentProtocol = "TestAcceptableCompactCertWeight"
- proto := config.Consensus[votersHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[votersHdr.CurrentProtocol] = proto
- out := AcceptableCompactCertWeight(votersHdr, firstValid, logger)
- require.Equal(t, uint64(0), out)
-
- votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState)
- cc := votersHdr.CompactCert[protocol.CompactCertBasic]
- cc.CompactCertVotersTotal.Raw = 100
- votersHdr.CompactCert[protocol.CompactCertBasic] = cc
- out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
- require.Equal(t, uint64(100), out)
-
- // this should exercise the second return case
- firstValid = basics.Round(5)
- out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
- require.Equal(t, uint64(100), out)
-
- firstValid = basics.Round(6)
- proto.CompactCertWeightThreshold = 999999999
- config.Consensus[votersHdr.CurrentProtocol] = proto
- out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
- require.Equal(t, uint64(0x17), out)
-
- proto.CompactCertRounds = 10000
- votersHdr.Round = 10000
- firstValid = basics.Round(29000)
- config.Consensus[votersHdr.CurrentProtocol] = proto
- cc.CompactCertVotersTotal.Raw = 0x7fffffffffffffff
- votersHdr.CompactCert[protocol.CompactCertBasic] = cc
- proto.CompactCertWeightThreshold = 0x7fffffff
- config.Consensus[votersHdr.CurrentProtocol] = proto
- out = AcceptableCompactCertWeight(votersHdr, firstValid, logger)
- require.Equal(t, uint64(0x4cd35a85213a92a2), out)
-
- // Covers everything except "overflow that shouldn't happen" branches
-}
-
-func TestCompactCertParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var votersHdr bookkeeping.BlockHeader
- var hdr bookkeeping.BlockHeader
-
- res, err := CompactCertParams(votersHdr, hdr)
- require.Error(t, err) // not enabled
-
- votersHdr.CurrentProtocol = "TestCompactCertParams"
- proto := config.Consensus[votersHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[votersHdr.CurrentProtocol] = proto
- votersHdr.Round = 1
- res, err = CompactCertParams(votersHdr, hdr)
- require.Error(t, err) // wrong round
-
- votersHdr.Round = 2
- hdr.Round = 3
- res, err = CompactCertParams(votersHdr, hdr)
- require.Error(t, err) // wrong round
-
- hdr.Round = 4
- res, err = CompactCertParams(votersHdr, hdr)
- require.NoError(t, err)
- require.Equal(t, hdr.Round, res.SigRound)
-
- // Covers all cases except overflow
-}
diff --git a/ledger/internal/cow.go b/ledger/internal/cow.go
index f371bade7..0baaa9fc6 100644
--- a/ledger/internal/cow.go
+++ b/ledger/internal/cow.go
@@ -53,8 +53,9 @@ type roundCowParent interface {
checkDup(basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
txnCounter() uint64
getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
- compactCertNext() basics.Round
- blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
+ GetStateProofNextRound() basics.Round
+ BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
+ blockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error)
getStorageCounts(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
// note: getStorageLimits is redundant with the other methods
// and is provided to optimize state schema lookups
@@ -216,15 +217,19 @@ func (cb *roundCowState) txnCounter() uint64 {
return cb.lookupParent.txnCounter() + cb.txnCount
}
-func (cb *roundCowState) compactCertNext() basics.Round {
- if cb.mods.CompactCertNext != 0 {
- return cb.mods.CompactCertNext
+func (cb *roundCowState) GetStateProofNextRound() basics.Round {
+ if cb.mods.StateProofNext != 0 {
+ return cb.mods.StateProofNext
}
- return cb.lookupParent.compactCertNext()
+ return cb.lookupParent.GetStateProofNextRound()
}
-func (cb *roundCowState) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
- return cb.lookupParent.blockHdr(r)
+func (cb *roundCowState) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
+ return cb.lookupParent.BlockHdr(r)
+}
+
+func (cb *roundCowState) blockHdrCached(r basics.Round) (bookkeeping.BlockHeader, error) {
+ return cb.lookupParent.blockHdrCached(r)
}
func (cb *roundCowState) incTxnCount() {
@@ -232,15 +237,15 @@ func (cb *roundCowState) incTxnCount() {
}
func (cb *roundCowState) addTx(txn transactions.Transaction, txid transactions.Txid) {
- cb.mods.Txids[txid] = txn.LastValid
+ cb.mods.Txids[txid] = ledgercore.IncludedTransactions{LastValid: txn.LastValid, Intra: uint64(len(cb.mods.Txids))}
cb.incTxnCount()
if txn.Lease != [32]byte{} {
cb.mods.Txleases[ledgercore.Txlease{Sender: txn.Sender, Lease: txn.Lease}] = txn.LastValid
}
}
-func (cb *roundCowState) setCompactCertNext(rnd basics.Round) {
- cb.mods.CompactCertNext = rnd
+func (cb *roundCowState) SetStateProofNextRound(rnd basics.Round) {
+ cb.mods.StateProofNext = rnd
}
func (cb *roundCowState) child(hint int) *roundCowState {
@@ -248,7 +253,7 @@ func (cb *roundCowState) child(hint int) *roundCowState {
lookupParent: cb,
commitParent: cb,
proto: cb.proto,
- mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint, cb.mods.CompactCertNext),
+ mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint, cb.mods.StateProofNext),
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
}
@@ -262,8 +267,9 @@ func (cb *roundCowState) child(hint int) *roundCowState {
func (cb *roundCowState) commitToParent() {
cb.commitParent.mods.Accts.MergeAccounts(cb.mods.Accts)
- for txid, lv := range cb.mods.Txids {
- cb.commitParent.mods.Txids[txid] = lv
+ commitParentBaseIdx := uint64(len(cb.commitParent.mods.Txids))
+ for txid, incTxn := range cb.mods.Txids {
+ cb.commitParent.mods.Txids[txid] = ledgercore.IncludedTransactions{LastValid: incTxn.LastValid, Intra: commitParentBaseIdx + incTxn.Intra}
}
cb.commitParent.txnCount += cb.txnCount
@@ -287,7 +293,7 @@ func (cb *roundCowState) commitToParent() {
}
}
}
- cb.commitParent.mods.CompactCertNext = cb.mods.CompactCertNext
+ cb.commitParent.mods.StateProofNext = cb.mods.StateProofNext
}
func (cb *roundCowState) modifiedAccounts() []basics.Address {
diff --git a/ledger/internal/cow_test.go b/ledger/internal/cow_test.go
index 9c5ffcafb..32e6a36e4 100644
--- a/ledger/internal/cow_test.go
+++ b/ledger/internal/cow_test.go
@@ -89,11 +89,11 @@ func (ml *mockLedger) txnCounter() uint64 {
return 0
}
-func (ml *mockLedger) compactCertNext() basics.Round {
+func (ml *mockLedger) GetStateProofNextRound() basics.Round {
return 0
}
-func (ml *mockLedger) blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+func (ml *mockLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
err, hit := ml.blockErr[rnd]
if hit {
return bookkeeping.BlockHeader{}, err
@@ -102,6 +102,10 @@ func (ml *mockLedger) blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error
return hdr, nil
}
+func (ml *mockLedger) blockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ return ml.blockHdrCached(rnd)
+}
+
func checkCowByUpdate(t *testing.T, cow *roundCowState, delta ledgercore.AccountDeltas) {
for i := 0; i < delta.Len(); i++ {
addr, data := delta.GetByIdx(i)
diff --git a/ledger/internal/double_test.go b/ledger/internal/double_test.go
index 84f1f092b..212599e32 100644
--- a/ledger/internal/double_test.go
+++ b/ledger/internal/double_test.go
@@ -70,7 +70,14 @@ func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) {
dl.t.Helper()
if dl.eval == nil {
dl.beginBlock()
- defer dl.endBlock()
+ defer func() {
+ // only advance if the txn was supposed to succeed
+ if len(problem) > 0 {
+ dl.eval = nil
+ } else {
+ dl.endBlock()
+ }
+ }()
}
txn(dl.t, dl.generator, dl.eval, tx, problem...)
}
@@ -90,7 +97,14 @@ func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) {
dl.t.Helper()
if dl.eval == nil {
dl.beginBlock()
- defer dl.endBlock()
+ defer func() {
+ // only advance if the txgroup was supposed to succeed
+ if problem != "" {
+ dl.eval = nil
+ } else {
+ dl.endBlock()
+ }
+ }()
}
err := txgroup(dl.t, dl.generator, dl.eval, txns...)
if problem == "" {
diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go
index 20c6f5d5a..1944c4a69 100644
--- a/ledger/internal/eval.go
+++ b/ledger/internal/eval.go
@@ -24,7 +24,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -41,6 +40,7 @@ import (
// LedgerForCowBase represents subset of Ledger functionality needed for cow business
type LedgerForCowBase interface {
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
+ BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
LookupWithoutRewards(basics.Round, basics.Address) (ledgercore.AccountData, basics.Round, error)
LookupAsset(basics.Round, basics.Address, basics.AssetIndex) (ledgercore.AssetResource, error)
@@ -107,11 +107,11 @@ type roundCowBase struct {
// TxnCounter from previous block header.
txnCount uint64
- // Round of the next expected compact cert. In the common case this
- // is CompactCertNextRound from previous block header, except when
- // compact certs are first enabled, in which case this gets set
- // appropriately at the first block where compact certs are enabled.
- compactCertNextRnd basics.Round
+ // Round of the next expected state proof. In the common case this
+ // is StateProofNextRound from previous block header, except when
+ // state proofs are first enabled, in which case this gets set
+ // appropriately at the first block where state proofs are enabled.
+ stateProofNextRnd basics.Round
// The current protocol consensus params.
proto config.ConsensusParams
@@ -134,19 +134,19 @@ type roundCowBase struct {
creators map[creatable]foundAddress
}
-func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, compactCertNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
+func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, stateProofNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
return &roundCowBase{
- l: l,
- rnd: rnd,
- txnCount: txnCount,
- compactCertNextRnd: compactCertNextRnd,
- proto: proto,
- accounts: make(map[basics.Address]ledgercore.AccountData),
- appParams: make(map[ledgercore.AccountApp]cachedAppParams),
- assetParams: make(map[ledgercore.AccountAsset]cachedAssetParams),
- appLocalStates: make(map[ledgercore.AccountApp]cachedAppLocalState),
- assets: make(map[ledgercore.AccountAsset]cachedAssetHolding),
- creators: make(map[creatable]foundAddress),
+ l: l,
+ rnd: rnd,
+ txnCount: txnCount,
+ stateProofNextRnd: stateProofNextRnd,
+ proto: proto,
+ accounts: make(map[basics.Address]ledgercore.AccountData),
+ appParams: make(map[ledgercore.AccountApp]cachedAppParams),
+ assetParams: make(map[ledgercore.AccountAsset]cachedAssetParams),
+ appLocalStates: make(map[ledgercore.AccountApp]cachedAppLocalState),
+ assets: make(map[ledgercore.AccountAsset]cachedAssetHolding),
+ creators: make(map[creatable]foundAddress),
}
}
@@ -324,14 +324,18 @@ func (x *roundCowBase) txnCounter() uint64 {
return x.txnCount
}
-func (x *roundCowBase) compactCertNext() basics.Round {
- return x.compactCertNextRnd
+func (x *roundCowBase) GetStateProofNextRound() basics.Round {
+ return x.stateProofNextRnd
}
-func (x *roundCowBase) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
+func (x *roundCowBase) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
return x.l.BlockHdr(r)
}
+func (x *roundCowBase) blockHdrCached(r basics.Round) (bookkeeping.BlockHeader, error) {
+ return x.l.BlockHdrCached(r)
+}
+
func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
// For global, check if app params exist
if global {
@@ -517,14 +521,17 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics
*fromRewards = newFromRewards
}
- var overflowed bool
- fromBalNew.MicroAlgos, overflowed = basics.OSubA(fromBalNew.MicroAlgos, amt)
- if overflowed {
- return fmt.Errorf("overspend (account %v, data %+v, tried to spend %v)", from, fromBal, amt)
- }
- err = cs.putAccount(from, fromBalNew)
- if err != nil {
- return err
+ // Only write the change if it's meaningful (or required by old code).
+ if !amt.IsZero() || fromBal.MicroAlgos.RewardUnits(cs.proto) > 0 || !cs.proto.UnfundedSenders {
+ var overflowed bool
+ fromBalNew.MicroAlgos, overflowed = basics.OSubA(fromBalNew.MicroAlgos, amt)
+ if overflowed {
+ return fmt.Errorf("overspend (account %v, data %+v, tried to spend %v)", from, fromBal, amt)
+ }
+ err = cs.putAccount(from, fromBalNew)
+ if err != nil {
+ return err
+ }
}
toBal, err := cs.lookup(to)
@@ -542,53 +549,26 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics
*toRewards = newToRewards
}
- toBalNew.MicroAlgos, overflowed = basics.OAddA(toBalNew.MicroAlgos, amt)
- if overflowed {
- return fmt.Errorf("balance overflow (account %v, data %+v, was going to receive %v)", to, toBal, amt)
- }
- err = cs.putAccount(to, toBalNew)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cs *roundCowState) ConsensusParams() config.ConsensusParams {
- return cs.proto
-}
-
-func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.CompactCertType, cert compactcert.Cert, atRound basics.Round, validate bool) error {
- if certType != protocol.CompactCertBasic {
- return fmt.Errorf("compact cert type %d not supported", certType)
- }
-
- nextCertRnd := cs.compactCertNext()
-
- certHdr, err := cs.blockHdr(certRnd)
- if err != nil {
- return err
- }
-
- proto := config.Consensus[certHdr.CurrentProtocol]
-
- if validate {
- votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds))
- votersHdr, err := cs.blockHdr(votersRnd)
- if err != nil {
- return err
+ // Only write the change if it's meaningful (or required by old code).
+ if !amt.IsZero() || toBal.MicroAlgos.RewardUnits(cs.proto) > 0 || !cs.proto.UnfundedSenders {
+ var overflowed bool
+ toBalNew.MicroAlgos, overflowed = basics.OAddA(toBalNew.MicroAlgos, amt)
+ if overflowed {
+ return fmt.Errorf("balance overflow (account %v, data %+v, was going to receive %v)", to, toBal, amt)
}
-
- err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound)
+ err = cs.putAccount(to, toBalNew)
if err != nil {
return err
}
}
- cs.setCompactCertNext(certRnd + basics.Round(proto.CompactCertRounds))
return nil
}
+func (cs *roundCowState) ConsensusParams() config.ConsensusParams {
+ return cs.proto
+}
+
// BlockEvaluator represents an in-progress evaluation of a block
// against the ledger.
type BlockEvaluator struct {
@@ -617,7 +597,7 @@ type LedgerForEvaluator interface {
GenesisHash() crypto.Digest
GenesisProto() config.ConsensusParams
LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
- CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
+ VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error)
}
// EvaluatorOptions defines the evaluator creation options
@@ -697,18 +677,18 @@ func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts
eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, evalOpts.PaysetHint)
}
- base.compactCertNextRnd = eval.prevHeader.CompactCert[protocol.CompactCertBasic].CompactCertNextRound
+ base.stateProofNextRnd = eval.prevHeader.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
- // Check if compact certs are being enabled as of this block.
- if base.compactCertNextRnd == 0 && proto.CompactCertRounds != 0 {
- // Determine the first block that will contain a Merkle
+ // Check if state proofs are being enabled as of this block.
+ if base.stateProofNextRnd == 0 && proto.StateProofInterval != 0 {
+ // Determine the first block that will contain a Vector
// commitment to the voters. We need to account for the
- // fact that the voters come from CompactCertVotersLookback
+ // fact that the voters come from StateProofVotersLookback
// rounds ago.
- votersRound := (hdr.Round + basics.Round(proto.CompactCertVotersLookback)).RoundUpToMultipleOf(basics.Round(proto.CompactCertRounds))
+ votersRound := (hdr.Round + basics.Round(proto.StateProofVotersLookback)).RoundUpToMultipleOf(basics.Round(proto.StateProofInterval))
- // The first compact cert will appear CompactCertRounds after that.
- base.compactCertNextRnd = votersRound + basics.Round(proto.CompactCertRounds)
+ // The first state proof will appear StateProofInterval after that.
+ base.stateProofNextRnd = votersRound + basics.Round(proto.StateProofInterval)
}
latestRound, prevTotals, err := l.LatestTotals()
@@ -1006,12 +986,12 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
func (eval *BlockEvaluator) checkMinBalance(cow *roundCowState) error {
rewardlvl := cow.rewardsLevel()
for _, addr := range cow.modifiedAccounts() {
- // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here.
+ // Skip FeeSink, RewardsPool, and StateProofSender MinBalance checks here.
// There's only a few accounts, so space isn't an issue, and we don't
// expect them to have low balances, but if they do, it may cause
// surprises.
if addr == eval.block.FeeSink || addr == eval.block.RewardsPool ||
- addr == transactions.CompactCertSender {
+ addr == transactions.StateProofSender {
continue
}
@@ -1084,7 +1064,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
// Apply the transaction, updating the cow balances
applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.txnCounter())
if err != nil {
- return fmt.Errorf("transaction %v: %v", txid, err)
+ return fmt.Errorf("transaction %v: %w", txid, err)
}
// Validate applyData if we are validating an existing block.
@@ -1126,47 +1106,47 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// applyTransaction changes the balances according to this transaction.
-func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balances *roundCowState, evalParams *logic.EvalParams, gi int, ctr uint64) (ad transactions.ApplyData, err error) {
- params := balances.ConsensusParams()
+func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *roundCowState, evalParams *logic.EvalParams, gi int, ctr uint64) (ad transactions.ApplyData, err error) {
+ params := cow.ConsensusParams()
// move fee to pool
- err = balances.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
+ err = cow.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
if err != nil {
return
}
- err = apply.Rekey(balances, &tx)
+ err = apply.Rekey(cow, &tx)
if err != nil {
return
}
switch tx.Type {
case protocol.PaymentTx:
- err = apply.Payment(tx.PaymentTxnFields, tx.Header, balances, eval.specials, &ad)
+ err = apply.Payment(tx.PaymentTxnFields, tx.Header, cow, eval.specials, &ad)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, eval.specials, &ad, balances.round())
+ err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, cow, eval.specials, &ad, cow.round())
case protocol.AssetConfigTx:
- err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, balances, eval.specials, &ad, ctr)
+ err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, cow, eval.specials, &ad, ctr)
case protocol.AssetTransferTx:
- err = apply.AssetTransfer(tx.AssetTransferTxnFields, tx.Header, balances, eval.specials, &ad)
+ err = apply.AssetTransfer(tx.AssetTransferTxnFields, tx.Header, cow, eval.specials, &ad)
case protocol.AssetFreezeTx:
- err = apply.AssetFreeze(tx.AssetFreezeTxnFields, tx.Header, balances, eval.specials, &ad)
+ err = apply.AssetFreeze(tx.AssetFreezeTxnFields, tx.Header, cow, eval.specials, &ad)
case protocol.ApplicationCallTx:
- err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, balances, &ad, gi, evalParams, ctr)
+ err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, cow, &ad, gi, evalParams, ctr)
- case protocol.CompactCertTx:
- // in case of a CompactCertTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of
- // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe.
+ case protocol.StateProofTx:
+ // in case of a StateProofTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's StateProofNextRound depending on
+ // whether we're in validate/generate mode or not, however - given that this variable is only being used in these modes, it would be safe.
// The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to
- // be stored in memory. These deltas don't care about the compact certificate, and so we can improve the node load time. Additionally, it save us from
+ // be stored in memory. These deltas don't care about the state proofs, and so we can improve the node load time. Additionally, it save us from
// performing the validation during catchup, which is another performance boost.
if eval.validate || eval.generate {
- err = balances.compactCert(tx.CertRound, tx.CertType, tx.Cert, tx.Header.FirstValid, eval.validate)
+ err = apply.StateProof(tx.StateProofTxnFields, tx.Header.FirstValid, cow, eval.validate)
}
default:
@@ -1197,28 +1177,24 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balanc
return
}
-// compactCertVotersAndTotal returns the expected values of CompactCertVoters
-// and CompactCertVotersTotal for a block.
-func (eval *BlockEvaluator) compactCertVotersAndTotal() (root crypto.GenericDigest, total basics.MicroAlgos, err error) {
- if eval.proto.CompactCertRounds == 0 {
+// stateProofVotersAndTotal returns the expected values of StateProofVotersCommitment
+// and StateProofOnlineTotalWeight for a block.
+func (eval *BlockEvaluator) stateProofVotersAndTotal() (root crypto.GenericDigest, total basics.MicroAlgos, err error) {
+ if eval.proto.StateProofInterval == 0 {
return
}
- if eval.block.Round()%basics.Round(eval.proto.CompactCertRounds) != 0 {
+ if eval.block.Round()%basics.Round(eval.proto.StateProofInterval) != 0 {
return
}
- lookback := eval.block.Round().SubSaturate(basics.Round(eval.proto.CompactCertVotersLookback))
- voters, err := eval.l.CompactCertVoters(lookback)
- if err != nil {
+ lookback := eval.block.Round().SubSaturate(basics.Round(eval.proto.StateProofVotersLookback))
+ voters, err := eval.l.VotersForStateProof(lookback)
+ if err != nil || voters == nil {
return
}
- if voters != nil {
- root, total = voters.Tree.Root(), voters.TotalWeight
- }
-
- return
+ return voters.Tree.Root(), voters.TotalWeight, nil
}
// TestingTxnCounter - the method returns the current evaluator transaction counter. The method is used for testing purposes only.
@@ -1243,17 +1219,17 @@ func (eval *BlockEvaluator) endOfBlock() error {
eval.generateExpiredOnlineAccountsList()
- if eval.proto.CompactCertRounds > 0 {
- var basicCompactCert bookkeeping.CompactCertState
- basicCompactCert.CompactCertVoters, basicCompactCert.CompactCertVotersTotal, err = eval.compactCertVotersAndTotal()
+ if eval.proto.StateProofInterval > 0 {
+ var basicStateProof bookkeeping.StateProofTrackingData
+ basicStateProof.StateProofVotersCommitment, basicStateProof.StateProofOnlineTotalWeight, err = eval.stateProofVotersAndTotal()
if err != nil {
return err
}
- basicCompactCert.CompactCertNextRound = eval.state.compactCertNext()
+ basicStateProof.StateProofNextRound = eval.state.GetStateProofNextRound()
- eval.block.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState)
- eval.block.CompactCert[protocol.CompactCertBasic] = basicCompactCert
+ eval.block.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ eval.block.StateProofTracking[protocol.StateProofBasic] = basicStateProof
}
}
@@ -1267,8 +1243,6 @@ func (eval *BlockEvaluator) endOfBlock() error {
return err
}
- eval.state.mods.OptimizeAllocatedMemory(eval.proto)
-
if eval.validate {
// check commitments
txnRoot, err := eval.block.PaysetCommit()
@@ -1287,22 +1261,22 @@ func (eval *BlockEvaluator) endOfBlock() error {
return fmt.Errorf("txn count wrong: %d != %d", eval.block.TxnCounter, expectedTxnCount)
}
- expectedVoters, expectedVotersWeight, err := eval.compactCertVotersAndTotal()
+ expectedVoters, expectedVotersWeight, err := eval.stateProofVotersAndTotal()
if err != nil {
return err
}
- if !eval.block.CompactCert[protocol.CompactCertBasic].CompactCertVoters.IsEqual(expectedVoters) {
- return fmt.Errorf("CompactCertVoters wrong: %v != %v", eval.block.CompactCert[protocol.CompactCertBasic].CompactCertVoters, expectedVoters)
+ if !eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment.IsEqual(expectedVoters) {
+ return fmt.Errorf("StateProofVotersCommitment wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment, expectedVoters)
}
- if eval.block.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal != expectedVotersWeight {
- return fmt.Errorf("CompactCertVotersTotal wrong: %v != %v", eval.block.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal, expectedVotersWeight)
+ if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != expectedVotersWeight {
+ return fmt.Errorf("StateProofOnlineTotalWeight wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight, expectedVotersWeight)
}
- if eval.block.CompactCert[protocol.CompactCertBasic].CompactCertNextRound != eval.state.compactCertNext() {
- return fmt.Errorf("CompactCertNextRound wrong: %v != %v", eval.block.CompactCert[protocol.CompactCertBasic].CompactCertNextRound, eval.state.compactCertNext())
+ if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound != eval.state.GetStateProofNextRound() {
+ return fmt.Errorf("StateProofNextRound wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound, eval.state.GetStateProofNextRound())
}
- for ccType := range eval.block.CompactCert {
- if ccType != protocol.CompactCertBasic {
- return fmt.Errorf("CompactCertType %d unexpected", ccType)
+ for ccType := range eval.block.StateProofTracking {
+ if ccType != protocol.StateProofBasic {
+ return fmt.Errorf("StateProofType %d unexpected", ccType)
}
}
}
@@ -1473,6 +1447,7 @@ type evalTxValidator struct {
txcache verify.VerifiedTransactionCache
block bookkeeping.Block
verificationPool execpool.BacklogPool
+ ledger logic.LedgerForSignature
ctx context.Context
txgroups [][]transactions.SignedTxnWithAD
@@ -1503,7 +1478,7 @@ func (validator *evalTxValidator) run() {
unverifiedTxnGroups = validator.txcache.GetUnverifiedTranscationGroups(unverifiedTxnGroups, specialAddresses, validator.block.BlockHeader.CurrentProtocol)
- err := verify.PaysetGroups(validator.ctx, unverifiedTxnGroups, validator.block.BlockHeader, validator.verificationPool, validator.txcache)
+ err := verify.PaysetGroups(validator.ctx, unverifiedTxnGroups, validator.block.BlockHeader, validator.verificationPool, validator.txcache, validator.ledger)
if err != nil {
validator.done <- err
}
@@ -1558,6 +1533,7 @@ func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, vali
txvalidator.txcache = txcache
txvalidator.block = blk
txvalidator.verificationPool = executionPool
+ txvalidator.ledger = l
txvalidator.ctx = validationCtx
txvalidator.txgroups = paysetgroups
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index 21240467c..5a533ba8f 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"reflect"
+ "strconv"
"strings"
"testing"
@@ -422,14 +423,14 @@ func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn
fillDefaults(t, ledger, eval, txn)
err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
if err != nil {
- if len(problem) == 1 {
+ if len(problem) == 1 && problem[0] != "" {
require.Contains(t, err.Error(), problem[0])
} else {
require.NoError(t, err) // Will obviously fail
}
return
}
- require.Len(t, problem, 0)
+ require.True(t, len(problem) == 0 || problem[0] == "")
}
func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
@@ -605,21 +606,39 @@ func TestRewardsInAD(t *testing.T) {
defer dl.Close()
payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+ nonpartTxn := txntest.Txn{Type: protocol.KeyRegistrationTx, Sender: addrs[2], Nonparticipation: true}
+ payNonPart := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[2]}
+
+ if ver < 18 { // Nonpart reyreg happens in v18
+ dl.txn(&nonpartTxn, "tries to mark an account as nonparticipating")
+ } else {
+ dl.fullBlock(&nonpartTxn)
+ }
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
dl.fullBlock()
}
- vb := dl.fullBlock(&payTxn)
+ vb := dl.fullBlock(&payTxn, &payNonPart)
payInBlock := vb.Block().Payset[0]
+ nonPartInBlock := vb.Block().Payset[1]
if ver >= 15 {
require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ // Sender is not due for more, and Receiver is nonpart
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ if ver < 18 {
+ require.Greater(t, nonPartInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ } else {
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
+ }
} else {
- require.EqualValues(t, 0, payInBlock.ApplyData.SenderRewards.Raw)
- require.EqualValues(t, 0, payInBlock.ApplyData.ReceiverRewards.Raw)
+ require.Zero(t, payInBlock.ApplyData.SenderRewards)
+ require.Zero(t, payInBlock.ApplyData.ReceiverRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
}
})
}
@@ -708,7 +727,7 @@ func TestDeleteNonExistantKeys(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // teal v2 (apps)
+ // AVM v2 (apps)
testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
defer dl.Close()
@@ -870,15 +889,17 @@ var consensusByNumber = []protocol.ConsensusVersion{
protocol.ConsensusV21,
protocol.ConsensusV22,
protocol.ConsensusV23,
- protocol.ConsensusV24, // teal v2 (apps)
+ protocol.ConsensusV24, // AVM v2 (apps)
protocol.ConsensusV25,
protocol.ConsensusV26,
protocol.ConsensusV27,
protocol.ConsensusV28,
protocol.ConsensusV29,
- protocol.ConsensusV30, // teal v5 (inner txs)
- protocol.ConsensusV31, // teal v6 (inner txs with appls)
+ protocol.ConsensusV30, // AVM v5 (inner txs)
+ protocol.ConsensusV31, // AVM v6 (inner txs with appls)
protocol.ConsensusV32, // unlimited assets and apps
+ protocol.ConsensusV33, // 320 rounds
+ protocol.ConsensusV34, // AVM v7, stateproofs
protocol.ConsensusFuture,
}
@@ -943,6 +964,58 @@ func benchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B,
}
}
+// TestHeaderAccess tests FirstValidTime and `block` which can access previous
+// block headers.
+func TestHeaderAccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Added in v34
+ testConsensusRange(t, 34, 0, func(t *testing.T, ver int) {
+ cv := consensusByNumber[ver]
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ fvt := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ FirstValid: 0,
+ ApprovalProgram: "txn FirstValidTime",
+ }
+ dl.txn(&fvt, "round 0 is not available")
+
+ // advance current to 2
+ pay := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}
+ dl.fullBlock(&pay)
+
+ fvt.FirstValid = 1
+ dl.txn(&fvt, "round 0 is not available")
+
+ fvt.FirstValid = 2
+ dl.txn(&fvt) // current becomes 3
+
+ // Advance current round far enough to test access MaxTxnLife ago
+ for i := 0; i < int(config.Consensus[cv].MaxTxnLife); i++ {
+ dl.fullBlock()
+ }
+
+ // current should be 1003. Confirm.
+ require.EqualValues(t, 1002, dl.generator.Latest())
+ require.EqualValues(t, 1002, dl.validator.Latest())
+
+ fvt.FirstValid = 1003
+ fvt.LastValid = 1010
+ dl.txn(&fvt) // success advances the round
+ // now we're confident current is 1004, so construct a txn that is as
+ // old as possible, and confirm access.
+ fvt.FirstValid = 1004 - basics.Round(config.Consensus[cv].MaxTxnLife)
+ fvt.LastValid = 1004
+ dl.txn(&fvt)
+ })
+
+}
+
// TestLogsInBlock ensures that logs appear in the block properly
func TestLogsInBlock(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -989,133 +1062,184 @@ func TestLogsInBlock(t *testing.T) {
})
}
-// TestGhostTransactions confirms that accounts that don't even exist
+// TestUnfundedSenders confirms that accounts that don't even exist
// can be the Sender in some situations. If some other transaction
// covers the fee, and the transaction itself does not require an
// asset or a min balance, it's fine.
-func TestGhostTransactions(t *testing.T) {
- t.Skip("Behavior should be changed so test passes.")
-
+func TestUnfundedSenders(t *testing.T) {
/*
- I think we have a behavior we should fix. I’m going to call these
- transactions where the Sender has no account and the fee=0 “ghost”
- transactions. In a ghost transaction, we still call balances.Move to
- “pay” the fee. Further, Move does not short-circuit a Move of 0 (for
- good reason, allowing compounding). Therefore, in Move, we do rewards
- processing on the “ghost” account. That causes us to want to write a
- new accountdata for them. But if we do that, the minimum balance
- checker will catch it, and kill the transaction because the ghost isn’t
- allowed to have a balance of 0. I don’t think we can short-circuit
- Move(0) because a zero pay is a known way to get your rewards
- actualized. Instead, I advocate that we short-circuit the call to Move
- for 0 fees.
-
- // move fee to pool
- if !tx.Fee.IsZero() {
- err = balances.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
- if err != nil {
- return
- }
- }
-
- I think this must be controlled by consensus upgrade, but I would love
- to be told I’m wrong. The other option is to outlaw these
- transactions, but even that requires changing code if we want to be
- exactly correct, because they are currently allowed when there are no
- rewards to get paid out (as would happen in a new network, or if we
- stop participation rewards - notice that this test only fails on the
- 4th attempt, once rewards have accumulated).
-
- Will suggested that we could treat Ghost accounts as non-partipating.
- Maybe that would allow the Move code to avoid trying to update
- accountdata.
+ In a 0-fee transaction from unfunded sender, we still call balances.Move
+ to “pay” the fee. Move() does not short-circuit a Move of 0 (for good
+ reason, it allows compounding rewards). Therefore, in Move, we do
+ rewards processing on the unfunded account. Before
+ proto.UnfundedSenders, the rewards procesing would set the RewardsBase,
+ which would require the account be written to DB, and therefore the MBR
+ check would kick in (and fail). Now it skips the update if the account
+ has less than RewardsUnit, as the update is meaningless anyway.
*/
partitiontest.PartitionTest(t)
t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- asaIndex := basics.AssetIndex(1)
+ asaIndex := basics.AssetIndex(1)
- asa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- Clawback: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
- Freeze: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- Manager: basics.Address{0x0a, 0x0a, 0xe},
- },
- }
+ ghost := basics.Address{0x01}
- eval := nextBlock(t, l)
- txn(t, l, eval, &asa)
- endBlock(t, l, eval)
+ asa_create := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ Clawback: ghost,
+ Freeze: ghost,
+ Manager: ghost,
+ },
+ }
- benefactor := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- Fee: 2000,
- }
+ app_create := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
- ghost := basics.Address{0x01}
- ephemeral := []txntest.Txn{
- {
+ dl.fullBlock(&asa_create, &app_create)
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ fmt.Printf("addrs[0] = %+v\n", addrs[0])
+ fmt.Printf("addrs[1] = %+v\n", addrs[1])
+
+ benefactor := txntest.Txn{
Type: "pay",
- Amount: 0,
- Sender: ghost,
- Receiver: ghost,
- Fee: 0,
- },
- {
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: basics.Address{0x02},
- XferAsset: basics.AssetIndex(1),
- Fee: 0,
- },
- {
- Type: "axfer",
- AssetAmount: 0,
- Sender: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
- AssetReceiver: addrs[0],
- AssetSender: addrs[1],
- XferAsset: asaIndex,
- Fee: 0,
- },
- {
- Type: "afrz",
- Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: true,
- Fee: 0,
- },
- {
- Type: "afrz",
- Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: false,
- Fee: 0,
- },
- }
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ Fee: 2000,
+ }
- for i, e := range ephemeral {
- eval = nextBlock(t, l)
- err := txgroup(t, l, eval, &benefactor, &e)
- require.NoError(t, err, "i=%d %s", i, e.Type)
- endBlock(t, l, eval)
- }
+ ephemeral := []txntest.Txn{
+ {
+ Type: "pay",
+ Amount: 0,
+ Sender: ghost,
+ Receiver: ghost,
+ Fee: 0,
+ },
+ { // Axfer of 0
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: basics.Address{0x02},
+ XferAsset: basics.AssetIndex(1),
+ Fee: 0,
+ },
+ { // Clawback
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: addrs[0],
+ AssetSender: addrs[1],
+ XferAsset: asaIndex,
+ Fee: 0,
+ },
+ { // Freeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: true,
+ Fee: 0,
+ },
+ { // Unfreeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: false,
+ Fee: 0,
+ },
+ { // App call
+ Type: "appl",
+ Sender: ghost,
+ ApplicationID: basics.AppIndex(2),
+ Fee: 0,
+ },
+ { // App creation (only works because it's also deleted)
+ Type: "appl",
+ Sender: ghost,
+ OnCompletion: transactions.DeleteApplicationOC,
+ Fee: 0,
+ },
+ }
+
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ for i, e := range ephemeral {
+ dl.txgroup(problem, benefactor.Noted(strconv.Itoa(i)), &e)
+ }
+ })
+}
+
+// TestAppCallAppDuringInit is similar to TestUnfundedSenders test, but now the
+// unfunded sender is a newly created app. The fee has been paid by the outer
+// transaction, so the app should be able to make an app call as that requires
+// no min balance.
+func TestAppCallAppDuringInit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ approve := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ // construct a simple app
+ vb := dl.fullBlock(&approve)
+
+ // now make a new app that calls it during init
+ approveID := vb.Block().Payset[0].ApplicationID
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ call_in_init := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: `
+ itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn Applications 1
+ itxn_field ApplicationID
+ itxn_submit
+ int 1
+ `,
+ ForeignApps: []basics.AppIndex{approveID},
+ Fee: 2000, // Enough to have the inner fee paid for
+ }
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ dl.txn(&call_in_init, problem)
+ })
}
diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go
index b8bb438c2..8f07612be 100644
--- a/ledger/internal/eval_test.go
+++ b/ledger/internal/eval_test.go
@@ -30,13 +30,15 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
@@ -198,14 +200,15 @@ func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
}
-func TestCowCompactCert(t *testing.T) {
+func TestCowStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
- var certRnd basics.Round
- var certType protocol.CompactCertType
- var cert compactcert.Cert
+ var spType protocol.StateProofType
+ var stateProof stateproof.StateProof
var atRound basics.Round
var validate bool
+ msg := stateproofmsg.Message{}
+
accts0 := ledgertesting.RandomAccounts(20, true)
blocks := make(map[basics.Round]bookkeeping.BlockHeader)
blockErr := make(map[basics.Round]error)
@@ -214,45 +217,59 @@ func TestCowCompactCert(t *testing.T) {
&ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
0, ledgercore.AccountTotals{}, 0)
- certType = protocol.CompactCertType(1234) // bad cert type
- err := c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
+ spType = protocol.StateProofType(1234) // bad stateproof type
+ stateProofTx := transactions.StateProofTxnFields{
+ StateProofType: spType,
+ StateProof: stateProof,
+ Message: msg,
+ }
+ err := apply.StateProof(stateProofTx, atRound, c0, validate)
+ require.ErrorIs(t, err, apply.ErrStateProofTypeNotSupported)
- // no certRnd block
- certType = protocol.CompactCertBasic
+ // no spRnd block
+ stateProofTx.StateProofType = protocol.StateProofBasic
noBlockErr := errors.New("no block")
blockErr[3] = noBlockErr
- certRnd = 3
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
+ stateProofTx.Message.LastAttestedRound = 3
+ err = apply.StateProof(stateProofTx, atRound, c0, validate)
+ require.Contains(t, err.Error(), "no block")
+
+ // stateproof txn doesn't confirm the next state proof round. expected is in the past
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ c0.SetStateProofNextRound(8)
+ err = apply.StateProof(stateProofTx, atRound, c0, validate)
+ require.ErrorIs(t, err, apply.ErrExpectedDifferentStateProofRound)
+
+ // stateproof txn doesn't confirm the next state proof round. expected is in the future
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ c0.SetStateProofNextRound(32)
+ err = apply.StateProof(stateProofTx, atRound, c0, validate)
+ require.ErrorIs(t, err, apply.ErrExpectedDifferentStateProofRound)
// no votersRnd block
// this is slightly a mess of things that don't quite line up with likely usage
validate = true
- var certHdr bookkeeping.BlockHeader
- certHdr.CurrentProtocol = "TestCowCompactCert"
- certHdr.Round = 1
- proto := config.Consensus[certHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[certHdr.CurrentProtocol] = proto
- blocks[certHdr.Round] = certHdr
-
- certHdr.Round = 15
- blocks[certHdr.Round] = certHdr
- certRnd = certHdr.Round
+ var spHdr bookkeeping.BlockHeader
+ spHdr.CurrentProtocol = "TestCowStateProof"
+ spHdr.Round = 1
+ proto := config.Consensus[spHdr.CurrentProtocol]
+ proto.StateProofInterval = 2
+ config.Consensus[spHdr.CurrentProtocol] = proto
+ blocks[spHdr.Round] = spHdr
+
+ spHdr.Round = 15
+ blocks[spHdr.Round] = spHdr
+ stateProofTx.Message.LastAttestedRound = uint64(spHdr.Round)
+ c0.SetStateProofNextRound(15)
blockErr[13] = noBlockErr
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // validate fail
- certHdr.Round = 1
- certRnd = certHdr.Round
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
+ err = apply.StateProof(stateProofTx, atRound, c0, validate)
+ require.Contains(t, err.Error(), "no block")
// fall through to no err
validate = false
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ err = apply.StateProof(stateProofTx, atRound, c0, validate)
require.NoError(t, err)
// 100% coverage
@@ -431,13 +448,14 @@ func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.
}
type evalTestLedger struct {
- blocks map[basics.Round]bookkeeping.Block
- roundBalances map[basics.Round]map[basics.Address]basics.AccountData
- genesisHash crypto.Digest
- genesisProto config.ConsensusParams
- feeSink basics.Address
- rewardsPool basics.Address
- latestTotals ledgercore.AccountTotals
+ blocks map[basics.Round]bookkeeping.Block
+ roundBalances map[basics.Round]map[basics.Address]basics.AccountData
+ genesisHash crypto.Digest
+ genesisProto config.ConsensusParams
+ genesisProtoVersion protocol.ConsensusVersion
+ feeSink basics.Address
+ rewardsPool basics.Address
+ latestTotals ledgercore.AccountTotals
}
// newTestLedger creates a in memory Ledger that is as realistic as
@@ -464,6 +482,7 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTest
l.latestTotals.AddAccount(proto, ledgercore.ToAccountData(acctData), &ot)
}
l.genesisProto = proto
+ l.genesisProtoVersion = protocol.ConsensusCurrentVersion
require.False(t, genBlock.FeeSink.IsZero())
require.False(t, genBlock.RewardsPool.IsZero())
@@ -563,9 +582,14 @@ func (ledger *evalTestLedger) GenesisHash() crypto.Digest {
return ledger.genesisHash
}
-// GenesisProto returns the genesis hash for this ledger.
+// GenesisProto returns the genesis consensus params for this ledger.
func (ledger *evalTestLedger) GenesisProto() config.ConsensusParams {
- return ledger.genesisProto
+ return config.Consensus[ledger.genesisProtoVersion]
+}
+
+// GenesisProto returns the genesis consensus version for this ledger.
+func (ledger *evalTestLedger) GenesisProtoVersion() protocol.ConsensusVersion {
+ return ledger.genesisProtoVersion
}
// Latest returns the latest known block round added to the ledger.
@@ -623,7 +647,11 @@ func (ledger *evalTestLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeade
return block.BlockHeader, nil
}
-func (ledger *evalTestLedger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
+func (ledger *evalTestLedger) BlockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ return ledger.BlockHdrCached(rnd)
+}
+
+func (ledger *evalTestLedger) VotersForStateProof(rnd basics.Round) (*ledgercore.VotersForRound, error) {
return nil, errors.New("untested code path")
}
@@ -720,6 +748,10 @@ func (l *testCowBaseLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, err
return bookkeeping.BlockHeader{}, errors.New("not implemented")
}
+func (l *testCowBaseLedger) BlockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ return l.BlockHdrCached(rnd)
+}
+
func (l *testCowBaseLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return errors.New("not implemented")
}
@@ -870,7 +902,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
acctData, _ := blkEval.state.lookup(recvAddr)
- require.Equal(t, merklesignature.Verifier{}, acctData.StateProofID)
+ require.Equal(t, merklesignature.Verifier{}.Commitment, acctData.StateProofID)
require.Equal(t, crypto.VRFVerifier{}, acctData.SelectionID)
badBlock := *validatedBlock
@@ -1112,11 +1144,11 @@ func TestExpiredAccountGeneration(t *testing.T) {
recvAcct, err := eval.state.lookup(recvAddr)
require.NoError(t, err)
- require.Equal(t, recvAcct.Status, basics.Offline)
- require.Equal(t, recvAcct.VoteFirstValid, basics.Round(0))
- require.Equal(t, recvAcct.VoteLastValid, basics.Round(0))
- require.Equal(t, recvAcct.VoteKeyDilution, uint64(0))
- require.Equal(t, recvAcct.VoteID, crypto.OneTimeSignatureVerifier{})
- require.Equal(t, recvAcct.SelectionID, crypto.VRFVerifier{})
- require.Equal(t, recvAcct.StateProofID, merklesignature.Verifier{})
+ require.Equal(t, basics.Offline, recvAcct.Status)
+ require.Equal(t, basics.Round(0), recvAcct.VoteFirstValid)
+ require.Equal(t, basics.Round(0), recvAcct.VoteLastValid)
+ require.Equal(t, uint64(0), recvAcct.VoteKeyDilution)
+ require.Equal(t, crypto.OneTimeSignatureVerifier{}, recvAcct.VoteID)
+ require.Equal(t, crypto.VRFVerifier{}, recvAcct.SelectionID)
+ require.Equal(t, merklesignature.Verifier{}.Commitment, recvAcct.StateProofID)
}
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/internal/prefetcher/prefetcher.go
index e011ad23e..82e0d830c 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/internal/prefetcher/prefetcher.go
@@ -295,7 +295,7 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
case protocol.AssetTransferTx:
case protocol.AssetFreezeTx:
case protocol.ApplicationCallTx:
- case protocol.CompactCertTx:
+ case protocol.StateProofTx:
case protocol.KeyRegistrationTx:
}
// If you add new addresses here, also add them in getTxnAddresses().
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go
index ba14fbe03..05a672d32 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go
@@ -94,6 +94,11 @@ func (l *prefetcherAlignmentTestLedger) BlockHdr(round basics.Round) (bookkeepin
return bookkeeping.BlockHeader{},
fmt.Errorf("BlockHdr() round %d not supported", round)
}
+
+func (l *prefetcherAlignmentTestLedger) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return l.BlockHdr(round)
+}
+
func (l *prefetcherAlignmentTestLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
@@ -162,7 +167,7 @@ func (l *prefetcherAlignmentTestLedger) GenesisProto() config.ConsensusParams {
func (l *prefetcherAlignmentTestLedger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
return 0, ledgercore.AccountTotals{}, nil
}
-func (l *prefetcherAlignmentTestLedger) CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error) {
+func (l *prefetcherAlignmentTestLedger) VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error) {
return nil, nil
}
@@ -390,6 +395,7 @@ func TestEvaluatorPrefetcherAlignmentCreateAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -442,6 +448,7 @@ func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -497,6 +504,7 @@ func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -563,6 +571,7 @@ func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentAssetClawback(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -737,7 +746,7 @@ func TestEvaluatorPrefetcherAlignmentKeyreg(t *testing.T) {
var selectionPK crypto.VRFVerifier
selectionPK[0] = 2
var stateProofPK merklesignature.Verifier
- stateProofPK[0] = 3
+ stateProofPK.Commitment[0] = 3
txn := transactions.Transaction{
Type: protocol.KeyRegistrationTx,
@@ -748,7 +757,7 @@ func TestEvaluatorPrefetcherAlignmentKeyreg(t *testing.T) {
KeyregTxnFields: transactions.KeyregTxnFields{
VotePK: votePK,
SelectionPK: selectionPK,
- StateProofPK: stateProofPK,
+ StateProofPK: stateProofPK.Commitment,
VoteLast: 9,
VoteKeyDilution: 10,
},
@@ -802,6 +811,7 @@ func TestEvaluatorPrefetcherAlignmentCreateApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -856,6 +866,7 @@ func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -914,6 +925,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -978,6 +990,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1042,6 +1055,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1110,6 +1124,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testi
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1178,6 +1193,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *te
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1245,7 +1261,7 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *
require.Equal(t, requested, prefetched)
}
-func TestEvaluatorPrefetcherAlignmentCompactCert(t *testing.T) {
+func TestEvaluatorPrefetcherAlignmentStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -1265,12 +1281,12 @@ func TestEvaluatorPrefetcherAlignmentCompactCert(t *testing.T) {
}
txn := transactions.Transaction{
- Type: protocol.CompactCertTx,
+ Type: protocol.StateProofTx,
Header: transactions.Header{
Sender: addr,
GenesisHash: genesisHash(),
},
- CompactCertTxnFields: transactions.CompactCertTxnFields{},
+ StateProofTxnFields: transactions.StateProofTxnFields{},
}
requested, prefetched := run(t, l, txn)
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/internal/prefetcher/prefetcher_test.go
index 87a2e9d63..40fe6949b 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/internal/prefetcher/prefetcher_test.go
@@ -120,7 +120,7 @@ func (l *prefetcherTestLedger) GenesisProto() config.ConsensusParams {
func (l *prefetcherTestLedger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
return l.round, ledgercore.AccountTotals{}, nil
}
-func (l *prefetcherTestLedger) CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error) {
+func (l *prefetcherTestLedger) VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error) {
return nil, nil
}
@@ -209,6 +209,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
ledger := getPrefetcherTestLedger(rnd)
type testCase struct {
name string
+ skip bool
signedTxn transactions.SignedTxn
accounts []prefetcher.LoadedAccountDataEntry
resources []prefetcher.LoadedResourcesEntry
@@ -258,6 +259,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for a non-existing asset",
+ skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -294,6 +296,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for an existing asset",
+ skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -330,6 +333,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset transfer transaction",
+ skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetTransferTx,
@@ -381,6 +385,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset freeze transaction",
+ skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetFreezeTx,
@@ -430,6 +435,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "application transaction",
+ skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.ApplicationCallTx,
@@ -516,6 +522,9 @@ func TestEvaluatorPrefetcher(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
+ if testCase.skip {
+ t.Skip()
+ }
groups := make([][]transactions.SignedTxnWithAD, 1)
groups[0] = make([]transactions.SignedTxnWithAD, 1)
groups[0][0].SignedTxn = testCase.signedTxn
@@ -536,6 +545,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
// Test for error from LookupAsset
func TestAssetLookupError(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
@@ -585,6 +595,7 @@ func TestAssetLookupError(t *testing.T) {
// Test for error from GetCreatorForRound
func TestGetCreatorForRoundError(t *testing.T) {
+ t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
diff --git a/ledger/ledger.go b/ledger/ledger.go
index a48385ff0..f5e4b9a8f 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -28,7 +28,6 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -72,25 +71,29 @@ type Ledger struct {
genesisAccounts map[basics.Address]basics.AccountData
- genesisProto config.ConsensusParams
+ genesisProto config.ConsensusParams
+ genesisProtoVersion protocol.ConsensusVersion
// State-machine trackers
- accts accountUpdates
- catchpoint catchpointTracker
- txTail txTail
- bulletin bulletin
- notifier blockNotifier
- metrics metricsTracker
+ accts accountUpdates
+ acctsOnline onlineAccounts
+ catchpoint catchpointTracker
+ txTail txTail
+ bulletin bulletin
+ notifier blockNotifier
+ metrics metricsTracker
trackers trackerRegistry
trackerMu deadlock.RWMutex
- headerCache heapLRUCache
+ headerCache blockHeaderCache
// verifiedTxnCache holds all the verified transactions state
verifiedTxnCache verify.VerifiedTransactionCache
cfg config.Local
+
+ dbPathPrefix string
}
// OpenLedger creates a Ledger object, using SQLite database filenames
@@ -113,13 +116,15 @@ func OpenLedger(
genesisHash: genesisInitState.GenesisHash,
genesisAccounts: genesisInitState.Accounts,
genesisProto: config.Consensus[genesisInitState.Block.CurrentProtocol],
+ genesisProtoVersion: genesisInitState.Block.CurrentProtocol,
synchronousMode: db.SynchronousMode(cfg.LedgerSynchronousMode),
accountsRebuildSynchronousMode: db.SynchronousMode(cfg.AccountsRebuildSynchronousMode),
verifiedTxnCache: verify.MakeVerifiedTransactionCache(verifiedCacheSize),
cfg: cfg,
+ dbPathPrefix: dbPathPrefix,
}
- l.headerCache.maxEntries = 10
+ l.headerCache.initialize()
defer func() {
if err != nil {
@@ -154,9 +159,6 @@ func OpenLedger(
l.genesisAccounts = make(map[basics.Address]basics.AccountData)
}
- l.accts.initialize(cfg)
- l.catchpoint.initialize(cfg, dbPathPrefix)
-
err = l.reloadLedger()
if err != nil {
return nil, err
@@ -197,14 +199,19 @@ func (l *Ledger) reloadLedger() error {
// set account updates tracker as a driver to calculate tracker db round and committing offsets
trackers := []ledgerTracker{
- &l.accts, // update the balances
- &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
- &l.txTail, // update the transaction tail, tracking the recent 1000 txn
- &l.bulletin, // provide closed channel signaling support for completed rounds
- &l.notifier, // send OnNewBlocks to subscribers
- &l.metrics, // provides metrics reporting support
+ &l.accts, // update the balances
+ &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
+ &l.acctsOnline, // update online account balances history
+ &l.txTail, // update the transaction tail, tracking the recent 1000 txn
+ &l.bulletin, // provide closed channel signaling support for completed rounds
+ &l.notifier, // send OnNewBlocks to subscribers
+ &l.metrics, // provides metrics reporting support
}
+ l.accts.initialize(l.cfg)
+ l.acctsOnline.initialize(l.cfg)
+ l.catchpoint.initialize(l.cfg, l.dbPathPrefix)
+
err = l.trackers.initialize(l, trackers, l.cfg)
if err != nil {
return err
@@ -430,13 +437,13 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy
return l.accts.GetCreatorForRound(l.blockQ.latest(), cidx, ctype)
}
-// CompactCertVoters returns the top online accounts at round rnd.
+// VotersForStateProof returns the top online accounts at round rnd.
// The result might be nil, even with err=nil, if there are no voters
-// for that round because compact certs were not enabled.
-func (l *Ledger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
+// for that round because state proofs were not enabled.
+func (l *Ledger) VotersForStateProof(rnd basics.Round) (*ledgercore.VotersForRound, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.voters.getVoters(rnd)
+ return l.acctsOnline.voters.getVoters(rnd)
}
// ListAssets takes a maximum asset index and maximum result length, and
@@ -469,7 +476,6 @@ func (l *Ledger) LookupLatest(addr basics.Address) (basics.AccountData, basics.R
if err != nil {
return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err
}
-
return data, rnd, withoutRewards, nil
}
@@ -491,7 +497,6 @@ func (l *Ledger) LookupAccount(round basics.Round, addr basics.Address) (data le
// Intentionally apply (pending) rewards up to rnd, remembering the old value
withoutRewards = data.MicroAlgos
data = data.WithUpdatedRewards(config.Consensus[rewardsVersion], rewardsLevel)
-
return data, rnd, withoutRewards, nil
}
@@ -527,7 +532,7 @@ func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.
defer l.trackerMu.RUnlock()
// Intentionally apply (pending) rewards up to rnd.
- data, err := l.accts.LookupOnlineAccountData(rnd, addr)
+ data, err := l.acctsOnline.LookupOnlineAccountData(rnd, addr)
if err != nil {
return basics.OnlineAccountData{}, err
}
@@ -541,12 +546,14 @@ func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (le
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- data, validThrough, err := l.accts.LookupWithoutRewards(rnd, addr)
+ var result ledgercore.AccountData
+
+ result, validThrough, err := l.accts.LookupWithoutRewards(rnd, addr)
if err != nil {
return ledgercore.AccountData{}, basics.Round(0), err
}
- return data, validThrough, nil
+ return result, validThrough, nil
}
// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number.
@@ -560,7 +567,7 @@ func (l *Ledger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.OnlineTotals(rnd)
+ return l.acctsOnline.onlineTotals(rnd)
}
// CheckDup return whether a transaction is a duplicate one.
@@ -591,15 +598,14 @@ func (l *Ledger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
// BlockHdr returns the BlockHeader of the block for round rnd.
func (l *Ledger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) {
- value, exists := l.headerCache.Get(rnd)
+ blk, exists := l.headerCache.get(rnd)
if exists {
- blk = value.(bookkeeping.BlockHeader)
return
}
blk, err = l.blockQ.getBlockHdr(rnd)
if err == nil {
- l.headerCache.Put(rnd, blk)
+ l.headerCache.put(blk)
}
return
}
@@ -629,6 +635,7 @@ func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) err
}
return err
}
+ updates.OptimizeAllocatedMemory(l.cfg.MaxAcctLookback)
vb := ledgercore.MakeValidatedBlock(blk, updates)
return l.AddValidatedBlock(vb, cert)
@@ -649,7 +656,7 @@ func (l *Ledger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.
if err != nil {
return err
}
- l.headerCache.Put(blk.Round(), blk.BlockHeader)
+ l.headerCache.put(blk.BlockHeader)
l.trackers.newBlock(blk, vb.Delta())
l.log.Debugf("ledger.AddValidatedBlock: added blk %d", blk.Round())
return nil
@@ -682,11 +689,37 @@ func (l *Ledger) GenesisProto() config.ConsensusParams {
return l.genesisProto
}
+// GenesisProtoVersion returns the initial protocol version for this ledger.
+func (l *Ledger) GenesisProtoVersion() protocol.ConsensusVersion {
+ return l.genesisProtoVersion
+}
+
// GenesisAccounts returns initial accounts for this ledger.
func (l *Ledger) GenesisAccounts() map[basics.Address]basics.AccountData {
return l.genesisAccounts
}
+// BlockHdrCached returns the block header if available.
+// Expected availability range is [Latest - MaxTxnLife, Latest]
+// allowing (MaxTxnLife + 1) = 1001 rounds back loopback.
+// The depth besides the MaxTxnLife is controlled by DeeperBlockHeaderHistory parameter
+// and currently set to 1.
+// Explanation:
+// Clients are expected to query blocks at rounds (txn.LastValid - (MaxTxnLife + 1)),
+// and because a txn is alive when the current round <= txn.LastValid
+// and valid if txn.LastValid - txn.FirstValid <= MaxTxnLife
+// the deepest lookup happens when txn.LastValid == current => txn.LastValid == Latest + 1
+// that gives Latest + 1 - (MaxTxnLife + 1) = Latest - MaxTxnLife as the first round to be accessible.
+func (l *Ledger) BlockHdrCached(rnd basics.Round) (hdr bookkeeping.BlockHeader, err error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ hdr, ok := l.txTail.blockHeader(rnd)
+ if !ok {
+ err = fmt.Errorf("no cached header data for round %d", rnd)
+ }
+ return hdr, err
+}
+
// GetCatchpointCatchupState returns the current state of the catchpoint catchup.
func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state CatchpointCatchupState, err error) {
return MakeCatchpointCatchupAccessor(l, l.log).GetState(ctx)
@@ -725,12 +758,13 @@ func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger int
return internal.Eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
}
-// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
-// to avoid memory pressure until the catchpoint file writing is complete.
-func (l *Ledger) IsWritingCatchpointFile() bool {
+// IsWritingCatchpointDataFile returns true when a catchpoint file is being generated.
+// The function is used by the catchup service to avoid memory pressure until the
+// catchpoint data file writing is complete.
+func (l *Ledger) IsWritingCatchpointDataFile() bool {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.catchpoint.IsWritingCatchpointFile()
+ return l.catchpoint.IsWritingCatchpointDataFile()
}
// VerifiedTransactionCache returns the verify.VerifiedTransactionCache
@@ -769,24 +803,6 @@ func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionP
return &vb, nil
}
-// CompactCertParams computes the parameters for building or verifying
-// a compact cert for block hdr, using voters from block votersHdr.
-func CompactCertParams(votersHdr bookkeeping.BlockHeader, hdr bookkeeping.BlockHeader) (res compactcert.Params, err error) {
- return internal.CompactCertParams(votersHdr, hdr)
-}
-
-// AcceptableCompactCertWeight computes the acceptable signed weight
-// of a compact cert if it were to appear in a transaction with a
-// particular firstValid round. Earlier rounds require a smaller cert.
-// votersHdr specifies the block that contains the Merkle commitment of
-// the voters for this compact cert (and thus the compact cert is for
-// votersHdr.Round() + CompactCertRounds).
-//
-// logger must not be nil; use at least logging.Base()
-func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
- return internal.AcceptableCompactCertWeight(votersHdr, firstValid, logger)
-}
-
// DebuggerLedger defines the minimal set of method required for creating a debug balances.
type DebuggerLedger = internal.LedgerForCowBase
diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go
index 133386f9e..a855f70de 100644
--- a/ledger/ledger_perf_test.go
+++ b/ledger/ledger_perf_test.go
@@ -20,8 +20,6 @@ import (
"context"
"crypto/rand"
"fmt"
- "io/ioutil"
- "os"
"path/filepath"
"strings"
"testing"
@@ -138,11 +136,9 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
deadlock.Opts.Disable = deadlockDisable
}()
- dbTempDir, err := ioutil.TempDir("", "testdir"+b.Name())
- require.NoError(b, err)
+ dbTempDir := b.TempDir()
dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
dbPrefix := filepath.Join(dbTempDir, dbName)
- defer os.RemoveAll(dbTempDir)
genesisInitState := getInitState()
@@ -153,7 +149,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
genesisInitState.Block.BlockHeader.GenesisHash = crypto.Digest{1}
creator := basics.Address{}
- _, err = rand.Read(creator[:])
+ _, err := rand.Read(creator[:])
require.NoError(b, err)
genesisInitState.Accounts[creator] = basics.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1234567890})
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 53e475980..114057f50 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -17,22 +17,24 @@
package ledger
import (
+ "bytes"
"context"
+ "database/sql"
+ "errors"
"fmt"
- "io/ioutil"
"math/rand"
"os"
- "runtime/pprof"
+ "runtime"
+ "sort"
"testing"
- "github.com/algorand/go-algorand/data/account"
- "github.com/algorand/go-algorand/util/db"
-
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -43,7 +45,9 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/execpool"
+ "github.com/algorand/go-deadlock"
)
func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Transaction) transactions.SignedTxn {
@@ -80,15 +84,15 @@ func initNextBlockHeader(correctHeader *bookkeeping.BlockHeader, lastBlock bookk
correctHeader.TxnCounter = lastBlock.TxnCounter
}
- if proto.CompactCertRounds > 0 {
- var ccBasic bookkeeping.CompactCertState
- if lastBlock.CompactCert[protocol.CompactCertBasic].CompactCertNextRound == 0 {
- ccBasic.CompactCertNextRound = (correctHeader.Round + basics.Round(proto.CompactCertVotersLookback)).RoundUpToMultipleOf(basics.Round(proto.CompactCertRounds)) + basics.Round(proto.CompactCertRounds)
+ if proto.StateProofInterval > 0 {
+ var ccBasic bookkeeping.StateProofTrackingData
+ if lastBlock.StateProofTracking[protocol.StateProofBasic].StateProofNextRound == 0 {
+ ccBasic.StateProofNextRound = (correctHeader.Round + basics.Round(proto.StateProofVotersLookback)).RoundUpToMultipleOf(basics.Round(proto.StateProofInterval)) + basics.Round(proto.StateProofInterval)
} else {
- ccBasic.CompactCertNextRound = lastBlock.CompactCert[protocol.CompactCertBasic].CompactCertNextRound
+ ccBasic.StateProofNextRound = lastBlock.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
}
- correctHeader.CompactCert = map[protocol.CompactCertType]bookkeeping.CompactCertState{
- protocol.CompactCertBasic: ccBasic,
+ correctHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: ccBasic,
}
}
}
@@ -141,6 +145,18 @@ func makeNewEmptyBlock(t *testing.T, l *Ledger, GenesisID string, initAccounts m
blk.RewardsPool = testPoolAddr
blk.FeeSink = testSinkAddr
blk.CurrentProtocol = lastBlock.CurrentProtocol
+
+ if proto.StateProofInterval != 0 && uint64(blk.Round())%proto.StateProofInterval == 0 && uint64(blk.Round()) != 0 {
+ voters, err := l.VotersForStateProof(blk.Round() - basics.Round(proto.StateProofVotersLookback))
+ require.NoError(t, err)
+ stateProofTracking := bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: voters.Tree.Root(),
+ StateProofOnlineTotalWeight: voters.TotalWeight,
+ StateProofNextRound: blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofNextRound,
+ }
+ blk.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateProofTracking
+ }
+
return
}
@@ -1091,7 +1107,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
signer := p.Participation.StateProofSecrets
require.NoError(t, err)
- correctKeyregFields.StateProofPK = *(signer.GetVerifier())
+ correctKeyregFields.StateProofPK = signer.GetVerifier().Commitment
}
correctKeyreg := transactions.Transaction{
@@ -1364,6 +1380,7 @@ func testLedgerRegressionFaultyLeaseFirstValidCheck2f3880f7(t *testing.T, versio
func TestLedgerBlockHdrCaching(t *testing.T) {
partitiontest.PartitionTest(t)
+ a := require.New(t)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
genesisInitState := getInitState()
@@ -1372,20 +1389,89 @@ func TestLedgerBlockHdrCaching(t *testing.T) {
cfg.Archival = true
log := logging.TestingLog(t)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
+ a.NoError(err)
defer l.Close()
blk := genesisInitState.Block
- for i := 0; i < 128; i++ {
+ for i := 0; i < 1024; i++ {
blk.BlockHeader.Round++
blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
err := l.AddBlock(blk, agreement.Certificate{})
- require.NoError(t, err)
+ a.NoError(err)
hdr, err := l.BlockHdr(blk.BlockHeader.Round)
- require.NoError(t, err)
- require.Equal(t, blk.BlockHeader, hdr)
+ a.NoError(err)
+ a.Equal(blk.BlockHeader, hdr)
+ }
+
+ rnd := basics.Round(128)
+ hdr, err := l.BlockHdr(rnd) // should update LRU cache but not latestBlockHeaderCache
+ a.NoError(err)
+ a.Equal(rnd, hdr.Round)
+
+ _, exists := l.headerCache.lruCache.Get(rnd)
+ a.True(exists)
+
+ _, exists = l.headerCache.latestHeaderCache.get(rnd)
+ a.False(exists)
+}
+
+func BenchmarkLedgerBlockHdrCaching(b *testing.B) {
+ benchLedgerCache(b, 1024-256+1)
+}
+
+func BenchmarkLedgerBlockHdrWithoutCaching(b *testing.B) {
+ benchLedgerCache(b, 100)
+}
+
+type nullWriter struct{} // logging output not required
+
+func (w nullWriter) Write(data []byte) (n int, err error) {
+ return len(data), nil
+}
+
+func benchLedgerCache(b *testing.B, startRound basics.Round) {
+ a := require.New(b)
+
+ dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
+ genesisInitState := getInitState()
+ const inMem = false // benchmark actual DB stored in disk instead of on memory
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ log := logging.TestingLog(b)
+ log.SetOutput(nullWriter{})
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ a.NoError(err)
+ defer func() { // close ledger and remove temporary DB file
+ l.Close()
+ err := os.Remove(dbName + ".tracker.sqlite")
+ if err != nil {
+ fmt.Printf("os.Remove: %v \n", err)
+ }
+ err = os.Remove(dbName + ".block.sqlite")
+ if err != nil {
+ fmt.Printf("os.Remove: %v \n", err)
+ }
+
+ }()
+
+ blk := genesisInitState.Block
+
+ // Fill ledger (and its cache) with blocks
+ for i := 0; i < 1024; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ err := l.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ for j := startRound; j < startRound+256; j++ { // these rounds should be in cache
+ hdr, err := l.BlockHdr(j)
+ a.NoError(err)
+ a.Equal(j, hdr.Round)
+ }
}
}
@@ -1564,6 +1650,146 @@ func TestListAssetsAndApplications(t *testing.T) {
require.Equal(t, appCount, len(results))
}
+// TestLedgerKeepsOldBlocksForStateProof test that if stateproof chain is delayed for X intervals, the ledger will not
+// remove old blocks from the database. When verifying old stateproof transaction, nodes must have the header of the corresponding
+// voters round, if this won't be available the verification would fail.
+// the voter tracker should prevent the remove needed blocks from the database.
+func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs we would
+ // give up on old state proofs only after stateproofInterval*3
+ maxBlocks := int((config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals + 2) * config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval)
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
+
+ // place real values on the participation period, so we would create a commitment with some stake.
+ accountsWithValid := make(map[basics.Address]basics.AccountData)
+ for addr, elem := range genesisInitState.Accounts {
+ newAccount := elem
+ newAccount.Status = basics.Online
+ newAccount.VoteFirstValid = 1
+ newAccount.VoteLastValid = 10000
+ newAccount.VoteKeyDilution = 10
+ crypto.RandBytes(newAccount.VoteID[:])
+ crypto.RandBytes(newAccount.SelectionID[:])
+ crypto.RandBytes(newAccount.StateProofID[:])
+ accountsWithValid[addr] = newAccount
+ }
+ genesisInitState.Accounts = accountsWithValid
+
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ lastBlock, err := l.Block(l.Latest())
+ proto := config.Consensus[lastBlock.CurrentProtocol]
+ accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks)
+ keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks)
+ // regular addresses: all init accounts minus pools
+
+ addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts)+maxBlocks)
+ i := 0
+ for addr := range genesisInitState.Accounts {
+ if addr != testPoolAddr && addr != testSinkAddr {
+ addresses[i] = addr
+ i++
+ }
+ accounts[addr] = genesisInitState.Accounts[addr]
+ keys[addr] = initKeys[addr]
+ }
+
+ for i := 0; i < maxBlocks; i++ {
+ addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
+ }
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+
+ // On this round there is no give up on any state proof - so we would be able to verify an old state proof txn.
+
+ // We now create block with stateproof transaction. since we don't want to complicate the test and create
+ // a cryptographically correct stateproof we would make sure that only the crypto part of the verification fails.
+ blk := createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts)
+ _, err = l.Validate(context.Background(), blk, backlogPool)
+ require.ErrorContains(t, err, "state proof crypto error")
+
+ for i := uint64(0); i < proto.StateProofInterval; i++ {
+ addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
+ }
+
+ l.WaitForCommit(l.Latest())
+ // at the point the ledger would remove the voters round for the database.
+ // that will cause the stateproof transaction verification to fail because there are
+ // missing blocks
+ blk = createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts)
+ _, err = l.Validate(context.Background(), blk, backlogPool)
+ expectedErr := &ledgercore.ErrNoEntry{}
+ require.True(t, errors.As(err, expectedErr), fmt.Sprintf("got error %s", err))
+}
+
+func createBlkWithStateproof(t *testing.T, maxBlocks int, proto config.ConsensusParams, genesisInitState ledgercore.InitState, l *Ledger, accounts map[basics.Address]basics.AccountData) bookkeeping.Block {
+ sp := stateproof.StateProof{SignedWeight: 5000000000000000}
+ var stxn transactions.SignedTxn
+ stxn.Txn.Type = protocol.StateProofTx
+ stxn.Txn.Sender = transactions.StateProofSender
+ stxn.Txn.FirstValid = basics.Round(uint64(maxBlocks) - proto.StateProofInterval)
+ stxn.Txn.LastValid = stxn.Txn.FirstValid + basics.Round(proto.MaxTxnLife)
+ stxn.Txn.GenesisHash = genesisInitState.GenesisHash
+ stxn.Txn.StateProofType = protocol.StateProofBasic
+ stxn.Txn.Message.LastAttestedRound = 512
+ stxn.Txn.StateProof = sp
+
+ blk := makeNewEmptyBlock(t, l, t.Name(), accounts)
+ proto = config.Consensus[blk.CurrentProtocol]
+ for _, stx := range []transactions.SignedTxn{stxn} {
+ txib, err := blk.EncodeSignedTxn(stx, transactions.ApplyData{})
+ require.NoError(t, err)
+ if proto.TxnCounter {
+ blk.TxnCounter = blk.TxnCounter + 1
+ }
+ blk.Payset = append(blk.Payset, txib)
+ }
+
+ var err error
+ blk.TxnCommitments, err = blk.PaysetCommit()
+ require.NoError(t, err)
+ return blk
+}
+
+func addDummyBlock(t *testing.T, addresses []basics.Address, proto config.ConsensusParams, l *Ledger, initKeys map[basics.Address]*crypto.SignatureSecrets, genesisInitState ledgercore.InitState) {
+ stxns := make([]transactions.SignedTxn, 2)
+ for j := 0; j < 2; j++ {
+ txHeader := transactions.Header{
+ Sender: addresses[0],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: l.Latest() + 1,
+ LastValid: l.Latest() + 10,
+ GenesisID: t.Name(),
+ GenesisHash: crypto.Hash([]byte(t.Name())),
+ Note: []byte{uint8(j)},
+ }
+
+ payment := transactions.PaymentTxnFields{
+ Receiver: addresses[0],
+ Amount: basics.MicroAlgos{Raw: 1000},
+ }
+
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: txHeader,
+ PaymentTxnFields: payment,
+ }
+ stxns[j] = sign(initKeys, tx)
+ }
+ err := l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
+ require.NoError(t, err)
+
+}
+
func TestLedgerMemoryLeak(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1574,13 +1800,16 @@ func TestLedgerMemoryLeak(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
+ deadlock.Opts.Disable = true // catchpoint writing might take long
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
- maxBlocks := 10000
+ const maxBlocks = 1_000_000
nftPerAcct := make(map[basics.Address]int)
lastBlock, err := l.Block(l.Latest())
+ require.NoError(t, err)
proto := config.Consensus[lastBlock.CurrentProtocol]
accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks)
keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks)
@@ -1596,6 +1825,9 @@ func TestLedgerMemoryLeak(t *testing.T) {
keys[addr] = initKeys[addr]
}
+ fmt.Printf("%s\t%s\t%s\t%s\n", "Round", "TotalAlloc, MB", "HeapAlloc, MB", "LiveObj")
+ fmt.Printf("%s\t%s\t%s\t%s\n", "-----", "--------------", "-------------", "-------")
+
curAddressIdx := 0
// run for maxBlocks rounds
// generate 1000 txn per block
@@ -1673,18 +1905,28 @@ func TestLedgerMemoryLeak(t *testing.T) {
}
err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
require.NoError(t, err)
- if i%100 == 0 {
- l.WaitForCommit(l.Latest())
- fmt.Printf("block: %d\n", l.Latest())
+
+ latest := l.Latest()
+ if latest%100 == 0 {
+ l.WaitForCommit(latest)
}
- if i%1000 == 0 && i > 0 {
- memprofile := fmt.Sprintf("%s-memprof-%d", t.Name(), i)
- f, err := os.Create(memprofile)
- require.NoError(t, err)
- err = pprof.WriteHeapProfile(f)
- require.NoError(t, err)
- f.Close()
- fmt.Printf("Profile %s created\n", memprofile)
+ if latest%1000 == 0 || i%1000 == 0 && i > 0 {
+ // pct := debug.SetGCPercent(-1) // prevent CG in between memory stats reading and heap profiling
+
+ var rtm runtime.MemStats
+ runtime.ReadMemStats(&rtm)
+ const meg = 1024 * 1024
+ fmt.Printf("%5d\t%14d\t%13d\t%7d\n", latest, rtm.TotalAlloc/meg, rtm.HeapAlloc/meg, rtm.Mallocs-rtm.Frees)
+
+ // Use the code below to generate memory profile if needed for debugging
+ // memprofile := fmt.Sprintf("%s-memprof-%d", t.Name(), latest)
+ // f, err := os.Create(memprofile)
+ // require.NoError(t, err)
+ // err = pprof.WriteHeapProfile(f)
+ // require.NoError(t, err)
+ // f.Close()
+
+ // debug.SetGCPercent(pct)
}
}
}
@@ -1737,8 +1979,7 @@ func TestLookupAgreement(t *testing.T) {
func BenchmarkLedgerStartup(b *testing.B) {
log := logging.TestingLog(b)
- tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkLedgerStartup")
- require.NoError(b, err)
+ tmpDir := b.TempDir()
genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
@@ -1770,5 +2011,811 @@ func BenchmarkLedgerStartup(b *testing.B) {
b.Run("DiskDatabase/Archival", func(b *testing.B) {
testOpenLedger(b, false, cfg)
})
- os.RemoveAll(tmpDir)
+}
+
+// TestLedgerReloadShrinkDeltas checks the ledger has correct account state
+// after reloading with new configuration with shorter in-memory deltas for trackers
+func TestLedgerReloadShrinkDeltas(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10_000_000_000)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ const inMem = false
+ cfg := config.GetDefaultLocal()
+ cfg.MaxAcctLookback = proto.MaxBalLookback
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer func() {
+ l.Close()
+ os.Remove(dbName + ".block.sqlite")
+ os.Remove(dbName + ".tracker.sqlite")
+ }()
+
+ maxBlocks := int(proto.MaxBalLookback * 2)
+ accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts))
+ keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys))
+ // regular addresses: all init accounts minus pools
+ addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts))
+ i := 0
+ for addr := range genesisInitState.Accounts {
+ if addr != testPoolAddr && addr != testSinkAddr {
+ addresses[i] = addr
+ i++
+ }
+ accounts[addr] = genesisInitState.Accounts[addr]
+ keys[addr] = initKeys[addr]
+ }
+ sort.SliceStable(addresses, func(i, j int) bool { return bytes.Compare(addresses[i][:], addresses[j][:]) == -1 })
+
+ onlineTotals := make([]basics.MicroAlgos, maxBlocks+1)
+ curAddressIdx := 0
+ maxValidity := basics.Round(20) // some number different from number of txns in blocks
+ txnIDs := make(map[basics.Round]map[transactions.Txid]struct{})
+ // run for maxBlocks rounds with random payment transactions
+ // generate 1000 txn per block
+ for i := 0; i < maxBlocks; i++ {
+ stxns := make([]transactions.SignedTxn, 10)
+ latest := l.Latest()
+ txnIDs[latest+1] = make(map[transactions.Txid]struct{})
+ for j := 0; j < 10; j++ {
+ feeMult := rand.Intn(5) + 1
+ amountMult := rand.Intn(1000) + 1
+ receiver := ledgertesting.RandomAddress()
+ txHeader := transactions.Header{
+ Sender: addresses[curAddressIdx],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * uint64(feeMult)},
+ FirstValid: latest + 1,
+ LastValid: latest + maxValidity,
+ GenesisID: t.Name(),
+ GenesisHash: crypto.Hash([]byte(t.Name())),
+ }
+
+ correctPayFields := transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: uint64(100 * amountMult)},
+ }
+
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: txHeader,
+ PaymentTxnFields: correctPayFields,
+ }
+
+ stxns[j] = sign(initKeys, tx)
+ curAddressIdx = (curAddressIdx + 1) % len(addresses)
+ txnIDs[latest+1][tx.ID()] = struct{}{}
+ }
+ err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
+ require.NoError(t, err)
+ if i%100 == 0 || i == maxBlocks-1 {
+ l.WaitForCommit(latest + 1)
+ }
+ onlineTotals[i+1], err = l.accts.onlineTotals(basics.Round(i + 1))
+ require.NoError(t, err)
+ }
+
+ latest := l.Latest()
+ nextRound := latest + 1
+ balancesRound := nextRound.SubSaturate(basics.Round(proto.MaxBalLookback))
+
+ origBalances := make([]basics.MicroAlgos, len(addresses))
+ origRewardsBalances := make([]basics.MicroAlgos, len(addresses))
+ origAgreementBalances := make([]basics.MicroAlgos, len(addresses))
+ for i, addr := range addresses {
+ ad, rnd, err := l.LookupWithoutRewards(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ origBalances[i] = ad.MicroAlgos
+
+ acct, rnd, wo, err := l.LookupAccount(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origBalances[i], wo)
+ origRewardsBalances[i] = acct.MicroAlgos
+
+ oad, err := l.LookupAgreement(balancesRound, addr)
+ require.NoError(t, err)
+ origAgreementBalances[i] = oad.MicroAlgosWithRewards
+ }
+
+ var nonZeros int
+ for _, bal := range origAgreementBalances {
+ if bal.Raw > 0 {
+ nonZeros++
+ }
+ }
+ require.Greater(t, nonZeros, 0)
+
+ // at round "maxBlocks" the ledger must have maxValidity blocks of transactions
+ for i := latest; i <= latest+maxValidity; i++ {
+ for txid := range txnIDs[i] {
+ require.NoError(t, l.CheckDup(proto, nextRound, i-maxValidity, i, txid, ledgercore.Txlease{}))
+ }
+ }
+
+ // check an error latest-1
+ for txid := range txnIDs[latest-1] {
+ require.Error(t, l.CheckDup(proto, nextRound, latest-maxValidity, latest-1, txid, ledgercore.Txlease{}))
+ }
+
+ shorterLookback := config.GetDefaultLocal().MaxAcctLookback
+ require.Less(t, shorterLookback, cfg.MaxAcctLookback)
+ cfg.MaxAcctLookback = shorterLookback
+ l.cfg = cfg
+ l.reloadLedger()
+
+ _, err = l.OnlineTotals(basics.Round(proto.MaxBalLookback - shorterLookback))
+ require.Error(t, err)
+ for i := basics.Round(proto.MaxBalLookback - shorterLookback + 1); i <= l.Latest(); i++ {
+ online, err := l.OnlineTotals(i)
+ require.NoError(t, err)
+ require.Equal(t, onlineTotals[i], online)
+ }
+
+ for i, addr := range addresses {
+ ad, rnd, err := l.LookupWithoutRewards(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origBalances[i], ad.MicroAlgos)
+
+ acct, rnd, wo, err := l.LookupAccount(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origRewardsBalances[i], acct.MicroAlgos)
+ require.Equal(t, origBalances[i], wo)
+
+ oad, err := l.LookupAgreement(balancesRound, addr)
+ require.NoError(t, err)
+ require.Equal(t, origAgreementBalances[i], oad.MicroAlgosWithRewards)
+
+ // TODO:
+ // add a test checking all committed pre-reload entries are gone
+ // add as a tracker test
+ }
+
+ // at round maxBlocks the ledger must have maxValidity blocks of transactions, check
+ for i := latest; i <= latest+maxValidity; i++ {
+ for txid := range txnIDs[i] {
+ require.NoError(t, l.CheckDup(proto, nextRound, i-maxValidity, i, txid, ledgercore.Txlease{}))
+ }
+ }
+
+ // check an error latest-1
+ for txid := range txnIDs[latest-1] {
+ require.Error(t, l.CheckDup(proto, nextRound, latest-maxValidity, latest-1, txid, ledgercore.Txlease{}))
+ }
+}
+
+// TestLedgerMigrateV6ShrinkDeltas opens a ledger + dbV6, submits a bunch of txns,
+// then migrates db and reopens ledger, and checks that the state is correct
+func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accountDBVersion = 6
+ defer func() {
+ accountDBVersion = 7
+ }()
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-migrate-shrink-deltas")
+ proto := config.Consensus[protocol.ConsensusV31]
+ proto.RewardsRateRefreshInterval = 500
+ config.Consensus[testProtocolVersion] = proto
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, testProtocolVersion, 10_000_000_000)
+ const inMem = false
+ cfg := config.GetDefaultLocal()
+ cfg.MaxAcctLookback = proto.MaxBalLookback
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
+ trackerDB, blockDB, err := openLedgerDB(dbName, inMem)
+ require.NoError(t, err)
+ defer func() {
+ trackerDB.Close()
+ blockDB.Close()
+ }()
+ // create tables so online accounts can still be written
+ err = trackerDB.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ if err := accountsCreateOnlineAccountsTable(ctx, tx); err != nil {
+ return err
+ }
+ if err := accountsCreateTxTailTable(ctx, tx); err != nil {
+ return err
+ }
+ if err := accountsCreateOnlineRoundParamsTable(ctx, tx); err != nil {
+ return err
+ }
+ if err := accountsCreateCatchpointFirstStageInfoTable(ctx, tx); err != nil {
+ return err
+ }
+ return nil
+ })
+ require.NoError(t, err)
+
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer func() {
+ l.Close()
+ os.Remove(dbName + ".block.sqlite")
+ os.Remove(dbName + ".tracker.sqlite")
+ os.Remove(dbName + ".block.sqlite-shm")
+ os.Remove(dbName + ".tracker.sqlite-shm")
+ os.Remove(dbName + ".block.sqlite-wal")
+ os.Remove(dbName + ".tracker.sqlite-wal")
+ }()
+
+ // remove online tracker in order to make v6 schema work
+ for i := range l.trackers.trackers {
+ if l.trackers.trackers[i] == l.trackers.acctsOnline {
+ l.trackers.trackers = append(l.trackers.trackers[:i], l.trackers.trackers[i+1:]...)
+ break
+ }
+ }
+ l.trackers.acctsOnline = nil
+ l.acctsOnline = onlineAccounts{}
+
+ maxBlocks := 2000
+ accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts))
+ keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys))
+ // regular addresses: all init accounts minus pools
+ addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts))
+ i := 0
+ for addr := range genesisInitState.Accounts {
+ if addr != testPoolAddr && addr != testSinkAddr {
+ addresses[i] = addr
+ i++
+ }
+ accounts[addr] = genesisInitState.Accounts[addr]
+ keys[addr] = initKeys[addr]
+ }
+ sort.SliceStable(addresses, func(i, j int) bool { return bytes.Compare(addresses[i][:], addresses[j][:]) == -1 })
+
+ onlineTotals := make([]basics.MicroAlgos, maxBlocks+1)
+ curAddressIdx := 0
+ maxValidity := basics.Round(20) // some number different from number of txns in blocks
+ txnIDs := make(map[basics.Round]map[transactions.Txid]struct{})
+ // run for maxBlocks rounds with random payment transactions
+ // generate numTxns txn per block
+ for i := 0; i < maxBlocks; i++ {
+ numTxns := crypto.RandUint64()%9 + 7
+ stxns := make([]transactions.SignedTxn, numTxns)
+ latest := l.Latest()
+ txnIDs[latest+1] = make(map[transactions.Txid]struct{})
+ for j := 0; j < int(numTxns); j++ {
+ feeMult := rand.Intn(5) + 1
+ amountMult := rand.Intn(1000) + 1
+ receiver := ledgertesting.RandomAddress()
+ txHeader := transactions.Header{
+ Sender: addresses[curAddressIdx],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * uint64(feeMult)},
+ FirstValid: latest + 1,
+ LastValid: latest + maxValidity,
+ GenesisID: t.Name(),
+ GenesisHash: crypto.Hash([]byte(t.Name())),
+ }
+
+ tx := transactions.Transaction{
+ Header: txHeader,
+ }
+
+ // have one txn be a keyreg txn that flips online to offline
+ // have all other txns be random payment txns
+ if j == 0 {
+ var keyregTxnFields transactions.KeyregTxnFields
+ // keep low accounts online, high accounts offline
+ // otherwise all accounts become offline eventually and no agreement balances to check
+ if curAddressIdx < len(addresses)/2 {
+ keyregTxnFields = transactions.KeyregTxnFields{
+ VoteFirst: latest + 1,
+ VoteLast: latest + 100_000,
+ }
+ var votepk crypto.OneTimeSignatureVerifier
+ votepk[0] = byte(j % 256)
+ votepk[1] = byte(i % 256)
+ votepk[2] = byte(254)
+ var selpk crypto.VRFVerifier
+ selpk[0] = byte(j % 256)
+ selpk[1] = byte(i % 256)
+ selpk[2] = byte(255)
+
+ keyregTxnFields.VotePK = votepk
+ keyregTxnFields.SelectionPK = selpk
+ }
+ tx.Type = protocol.KeyRegistrationTx
+ tx.KeyregTxnFields = keyregTxnFields
+ } else {
+ correctPayFields := transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: uint64(100 * amountMult)},
+ }
+ tx.Type = protocol.PaymentTx
+ tx.PaymentTxnFields = correctPayFields
+ }
+
+ stxns[j] = sign(initKeys, tx)
+ curAddressIdx = (curAddressIdx + 1) % len(addresses)
+ txnIDs[latest+1][tx.ID()] = struct{}{}
+ }
+ err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
+ require.NoError(t, err)
+ if i%100 == 0 || i == maxBlocks-1 {
+ l.WaitForCommit(latest + 1)
+ }
+ onlineTotals[i+1], err = l.accts.onlineTotals(basics.Round(i + 1))
+ require.NoError(t, err)
+ }
+
+ latest := l.Latest()
+ nextRound := latest + 1
+ balancesRound := nextRound.SubSaturate(basics.Round(proto.MaxBalLookback))
+
+ origBalances := make([]basics.MicroAlgos, len(addresses))
+ origRewardsBalances := make([]basics.MicroAlgos, len(addresses))
+ origAgreementBalances := make([]basics.MicroAlgos, len(addresses))
+ for i, addr := range addresses {
+ ad, rnd, err := l.LookupWithoutRewards(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ origBalances[i] = ad.MicroAlgos
+
+ acct, rnd, wo, err := l.LookupAccount(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origBalances[i], wo)
+ origRewardsBalances[i] = acct.MicroAlgos
+
+ acct, rnd, _, err = l.LookupAccount(balancesRound, addr)
+ require.NoError(t, err)
+ require.Equal(t, balancesRound, rnd)
+ if acct.Status == basics.Online {
+ origAgreementBalances[i] = acct.MicroAlgos
+ } else {
+ origAgreementBalances[i] = basics.MicroAlgos{}
+ }
+ }
+
+ var nonZeros int
+ for _, bal := range origAgreementBalances {
+ if bal.Raw > 0 {
+ nonZeros++
+ }
+ }
+ require.Greater(t, nonZeros, 0)
+
+ // at round "maxBlocks" the ledger must have maxValidity blocks of transactions
+ for i := latest; i <= latest+maxValidity; i++ {
+ for txid := range txnIDs[i] {
+ require.NoError(t, l.CheckDup(proto, nextRound, i-maxValidity, i, txid, ledgercore.Txlease{}))
+ }
+ }
+
+ // check an error latest-1
+ for txid := range txnIDs[latest-1] {
+ require.Error(t, l.CheckDup(proto, nextRound, latest-maxValidity, latest-1, txid, ledgercore.Txlease{}))
+ }
+
+ shorterLookback := config.GetDefaultLocal().MaxAcctLookback
+ require.Less(t, shorterLookback, cfg.MaxAcctLookback)
+ l.Close()
+
+ cfg.MaxAcctLookback = shorterLookback
+ accountDBVersion = 7
+ // delete tables since we want to check they can be made from other data
+ err = trackerDB.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ if _, err := tx.ExecContext(ctx, "DROP TABLE IF EXISTS onlineaccounts"); err != nil {
+ return err
+ }
+ if _, err := tx.ExecContext(ctx, "DROP TABLE IF EXISTS txtail"); err != nil {
+ return err
+ }
+ if _, err = tx.ExecContext(ctx, "DROP TABLE IF EXISTS onlineroundparamstail"); err != nil {
+ return err
+ }
+ return nil
+ })
+ require.NoError(t, err)
+
+ l2, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer func() {
+ l2.Close()
+ }()
+
+ _, err = l2.OnlineTotals(basics.Round(proto.MaxBalLookback - shorterLookback))
+ require.Error(t, err)
+ for i := l2.Latest() - basics.Round(proto.MaxBalLookback-1); i <= l2.Latest(); i++ {
+ online, err := l2.OnlineTotals(i)
+ require.NoError(t, err)
+ require.Equal(t, onlineTotals[i], online)
+ }
+
+ for i, addr := range addresses {
+ ad, rnd, err := l2.LookupWithoutRewards(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origBalances[i], ad.MicroAlgos)
+
+ acct, rnd, wo, err := l2.LookupAccount(latest, addr)
+ require.NoError(t, err)
+ require.Equal(t, latest, rnd)
+ require.Equal(t, origRewardsBalances[i], acct.MicroAlgos)
+ require.Equal(t, origBalances[i], wo)
+
+ oad, err := l2.LookupAgreement(balancesRound, addr)
+ require.NoError(t, err)
+ require.Equal(t, origAgreementBalances[i], oad.MicroAlgosWithRewards)
+ }
+
+ // at round maxBlocks the ledger must have maxValidity blocks of transactions, check
+ for i := latest; i <= latest+maxValidity; i++ {
+ for txid := range txnIDs[i] {
+ require.NoError(t, l2.CheckDup(proto, nextRound, i-maxValidity, i, txid, ledgercore.Txlease{}))
+ }
+ }
+
+ // check an error latest-1
+ for txid := range txnIDs[latest-1] {
+ require.Error(t, l2.CheckDup(proto, nextRound, latest-maxValidity, latest-1, txid, ledgercore.Txlease{}))
+ }
+}
+
+// TestLedgerTxTailCachedBlockHeaders checks [Latest - MaxTxnLife...Latest] block headers
+// are available via txTail
+func TestLedgerTxTailCachedBlockHeaders(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 10_000_000_000)
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
+ l, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ proto := config.Consensus[protocol.ConsensusFuture]
+ maxBlocks := 2 * proto.MaxTxnLife
+ for i := uint64(0); i < maxBlocks; i++ {
+ err = l.addBlockTxns(t, genesisInitState.Accounts, []transactions.SignedTxn{}, transactions.ApplyData{})
+ require.NoError(t, err)
+ if i%100 == 0 || i == maxBlocks-1 {
+ l.WaitForCommit(l.Latest())
+ }
+ }
+
+ latest := l.Latest()
+ for i := latest - basics.Round(proto.MaxTxnLife); i <= latest; i++ {
+ blk, err := l.BlockHdrCached(i)
+ require.NoError(t, err)
+ require.Equal(t, blk.Round, i)
+ }
+
+ // additional checks: the txTail should have additional blocks:
+ // dbRound - (MaxTxnLife+1) is expected to be deleted and dbRound - (MaxTxnLife) is earliest available
+ l.trackerMu.RLock()
+ dbRound := l.trackers.dbRound
+ l.trackerMu.RUnlock()
+
+ start := dbRound - basics.Round(proto.MaxTxnLife)
+ end := latest - basics.Round(proto.MaxTxnLife)
+ for i := start; i < end; i++ {
+ blk, err := l.BlockHdrCached(i)
+ require.NoError(t, err)
+ require.Equal(t, blk.Round, i)
+ }
+
+ _, err = l.BlockHdrCached(start - 1)
+ require.Error(t, err)
+}
+
+// TestLedgerKeyregFlip generates keyreg transactions for flipping genesis accounts state.
+// It checks 1) lookup returns correct values 2) lookup agreement returns correct values
+func TestLedgerKeyregFlip(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10_000_000_000)
+ const inMem = false
+ cfg := config.GetDefaultLocal()
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer func() {
+ l.Close()
+ os.Remove(dbName + ".block.sqlite")
+ os.Remove(dbName + ".tracker.sqlite")
+ }()
+
+ const numFullBlocks = 1000
+ const numEmptyBlocks = 500
+
+ require.Equal(t, len(genesisInitState.Accounts), 12)
+ const numAccounts = 10 // 12 - pool and sink
+
+ // preallocate data for saving account info
+ var accounts [numFullBlocks][numAccounts]ledgercore.AccountData
+
+ lastBlock, err := l.Block(l.Latest())
+ require.NoError(t, err)
+ proto := config.Consensus[lastBlock.CurrentProtocol]
+
+ // regular addresses: all init accounts minus pools
+ addresses := make([]basics.Address, numAccounts)
+ i := 0
+ for addr := range genesisInitState.Accounts {
+ if addr != testPoolAddr && addr != testSinkAddr {
+ addresses[i] = addr
+ i++
+ }
+ }
+
+ isOnline := func(rndIdx, acctIdx, seed int) bool {
+ return (rndIdx+acctIdx+seed)%4 == 1
+ }
+ // run for numFullBlocks rounds
+ // generate 10 txn per block
+ for i := 0; i < numFullBlocks; i++ {
+ stxns := make([]transactions.SignedTxn, numAccounts)
+ latest := l.Latest()
+ require.Equal(t, basics.Round(i), latest)
+ seed := int(crypto.RandUint63() % 1_000_000)
+ for j := 0; j < numAccounts; j++ {
+ txHeader := transactions.Header{
+ Sender: addresses[j],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: latest + 1,
+ LastValid: latest + 10,
+ GenesisID: t.Name(),
+ GenesisHash: crypto.Hash([]byte(t.Name())),
+ }
+
+ keyregFields := transactions.KeyregTxnFields{
+ VoteFirst: latest + 1,
+ VoteLast: latest + 100_000,
+ }
+ if isOnline(i, j, seed) {
+ var votepk crypto.OneTimeSignatureVerifier
+ votepk[0] = byte(j % 256)
+ votepk[1] = byte(i % 256)
+ votepk[2] = byte(254)
+ var selpk crypto.VRFVerifier
+ selpk[0] = byte(j % 256)
+ selpk[1] = byte(i % 256)
+ selpk[2] = byte(255)
+
+ keyregFields.VotePK = votepk
+ keyregFields.SelectionPK = selpk
+ }
+
+ tx := transactions.Transaction{
+ Type: protocol.KeyRegistrationTx,
+ Header: txHeader,
+ KeyregTxnFields: keyregFields,
+ }
+ stxns[j] = sign(initKeys, tx)
+ }
+ err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{})
+ require.NoError(t, err)
+ for k := 0; k < numAccounts; k++ {
+ data, rnd, _, err := l.LookupAccount(basics.Round(i+1), addresses[k])
+ require.NoError(t, err)
+ require.Equal(t, rnd, basics.Round(i+1))
+ online := isOnline(i, k, seed)
+ require.Equal(t, online, data.Status == basics.Online)
+ if online {
+ require.Equal(t, byte(k%256), data.VoteID[0])
+ require.Equal(t, byte(i%256), data.VoteID[1])
+ require.Equal(t, byte(254), data.VoteID[2])
+ require.Equal(t, byte(k%256), data.SelectionID[0])
+ require.Equal(t, byte(i%256), data.SelectionID[1])
+ require.Equal(t, byte(255), data.SelectionID[2])
+ accounts[i][k] = data
+ }
+ }
+ }
+ l.WaitForCommit(l.Latest())
+ require.Equal(t, basics.Round(numFullBlocks), l.Latest())
+
+ for i := 0; i < numEmptyBlocks; i++ {
+ nextRound := basics.Round(numFullBlocks + i + 1)
+ balancesRound := nextRound.SubSaturate(basics.Round(proto.MaxBalLookback))
+ acctRoundIdx := int(balancesRound) - 1
+ if acctRoundIdx >= len(accounts) {
+ // checked all saved history, stop
+ break
+ }
+ for k := 0; k < numAccounts; k++ {
+ od, err := l.LookupAgreement(balancesRound, addresses[k])
+ require.NoError(t, err)
+ data := accounts[acctRoundIdx][k]
+ require.Equal(t, data.MicroAlgos, od.MicroAlgosWithRewards)
+ require.Equal(t, data.VoteFirstValid, od.VoteFirstValid)
+ require.Equal(t, data.VoteLastValid, od.VoteLastValid)
+ require.Equal(t, data.VoteID, od.VoteID)
+ }
+ err = l.addBlockTxns(t, genesisInitState.Accounts, []transactions.SignedTxn{}, transactions.ApplyData{})
+ require.NoError(t, err)
+ }
+ l.WaitForCommit(l.Latest())
+}
+
+func verifyVotersContent(t *testing.T, expected map[basics.Round]*ledgercore.VotersForRound, actual map[basics.Round]*ledgercore.VotersForRound) {
+ require.Equal(t, len(expected), len(actual))
+ for k, v := range actual {
+ require.NoError(t, v.Wait())
+ require.Equal(t, expected[k].Tree, v.Tree)
+ require.Equal(t, expected[k].Participants, v.Participants)
+ }
+}
+
+func TestVotersReloadFromDisk(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState := getInitState()
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ blk := genesisInitState.Block
+ var sp bookkeeping.StateProofTrackingData
+ sp.StateProofNextRound = basics.Round(proto.StateProofInterval * 2)
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ // we add blocks to the ledger to test reload from disk. we would like the history of the acctonline to extend.
+ // but we don't want to go behind stateproof recovery interval
+ for i := uint64(0); i < (proto.StateProofInterval*(proto.StateProofMaxRecoveryIntervals-2) - proto.StateProofVotersLookback); i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ // at this point the database should contain the voter for round 256 but the voters for round 512 should be in deltas
+ l.WaitForCommit(blk.BlockHeader.Round)
+ vtSnapshot := l.acctsOnline.voters.votersForRoundCache
+
+ // ensuring no tree was evicted.
+ for _, round := range []basics.Round{240, 496} {
+ require.Contains(t, vtSnapshot, round)
+ }
+
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache)
+}
+
+func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState := getInitState()
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ blk := genesisInitState.Block
+
+ sp := bookkeeping.StateProofTrackingData{
+ StateProofNextRound: basics.Round(proto.StateProofInterval * 2),
+ }
+
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ for i := uint64(0); i < (proto.StateProofInterval*3 - proto.StateProofVotersLookback); i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ // we simulate that the stateproof for round 512 is confirmed on chain, and we can move to the next one.
+ sp.StateProofNextRound = basics.Round(proto.StateProofInterval * 3)
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ for i := uint64(0); i < proto.StateProofInterval; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ l.WaitForCommit(blk.BlockHeader.Round)
+ vtSnapshot := l.acctsOnline.voters.votersForRoundCache
+
+ // verifying that the tree for round 512 is still in the cache, but the tree for round 256 is evicted.
+ require.Contains(t, vtSnapshot, basics.Round(496))
+ require.NotContains(t, vtSnapshot, basics.Round(240))
+
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache)
+}
+
+func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState := getInitState()
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ blk := genesisInitState.Block
+ var sp bookkeeping.StateProofTrackingData
+ sp.StateProofNextRound = basics.Round(proto.StateProofInterval * 2)
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ // we push proto.StateProofInterval * (proto.StateProofMaxRecoveryIntervals + 2) block into the ledger
+ // the reason for + 2 is the first state proof is on 2*stateproofinterval.
+ for i := uint64(0); i < (proto.StateProofInterval * (proto.StateProofMaxRecoveryIntervals + 2)); i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ // the voters tracker should contain all the voters for each stateproof round. nothing should be removed
+ l.WaitForCommit(blk.BlockHeader.Round)
+ vtSnapshot := l.acctsOnline.voters.votersForRoundCache
+ beforeRemoveVotersLen := len(vtSnapshot)
+ err = l.reloadLedger()
+ require.NoError(t, err)
+ _, found := l.acctsOnline.voters.votersForRoundCache[basics.Round(proto.StateProofInterval-proto.StateProofVotersLookback)]
+ require.True(t, found)
+ verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache)
+
+ for i := uint64(0); i < proto.StateProofInterval; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ // the voters tracker should give up on voters for round 512
+ l.WaitForCommit(blk.BlockHeader.Round)
+ vtSnapshot = l.acctsOnline.voters.votersForRoundCache
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache)
+ _, found = l.acctsOnline.voters.votersForRoundCache[basics.Round(proto.StateProofInterval-proto.StateProofVotersLookback)]
+ require.False(t, found)
+ require.Equal(t, beforeRemoveVotersLen, len(l.acctsOnline.voters.votersForRoundCache))
}
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index 5f141cdbb..eb09706ff 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -17,8 +17,6 @@
package ledgercore
import (
- "reflect"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
@@ -54,13 +52,17 @@ type AccountBaseData struct {
type VotingData struct {
VoteID crypto.OneTimeSignatureVerifier
SelectionID crypto.VRFVerifier
- StateProofID merklesignature.Verifier
+ StateProofID merklesignature.Commitment
VoteFirstValid basics.Round
VoteLastValid basics.Round
VoteKeyDilution uint64
+}
- // MicroAlgosWithReward basics.MicroAlgos
+// OnlineAccountData holds MicroAlgosWithRewards and VotingData as needed for agreement
+type OnlineAccountData struct {
+ MicroAlgosWithRewards basics.MicroAlgos
+ VotingData
}
// ToAccountData returns ledgercore.AccountData from basics.AccountData
@@ -141,7 +143,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res basics.Micro
// IsZero checks if an AccountData value is the same as its zero value.
func (u AccountData) IsZero() bool {
- return reflect.DeepEqual(u, AccountData{})
+ return u == AccountData{}
}
// Money is similar to basics account data Money function
@@ -151,26 +153,29 @@ func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (m
}
// OnlineAccountData calculates the online account data given an AccountData, by adding the rewards.
-func (u *AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) basics.OnlineAccountData {
+func (u AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) OnlineAccountData {
if u.Status != basics.Online {
// if the account is not Online and agreement requests it for some reason, clear it out
- return basics.OnlineAccountData{}
+ return OnlineAccountData{}
}
microAlgos, _, _ := basics.WithUpdatedRewards(
proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel,
)
- return basics.OnlineAccountData{
+ return OnlineAccountData{
MicroAlgosWithRewards: microAlgos,
- VoteID: u.VoteID,
- SelectionID: u.SelectionID,
- VoteFirstValid: u.VoteFirstValid,
- VoteLastValid: u.VoteLastValid,
- VoteKeyDilution: u.VoteKeyDilution,
+ VotingData: VotingData{
+ VoteID: u.VoteID,
+ SelectionID: u.SelectionID,
+ StateProofID: u.StateProofID,
+ VoteFirstValid: u.VoteFirstValid,
+ VoteLastValid: u.VoteLastValid,
+ VoteKeyDilution: u.VoteKeyDilution,
+ },
}
}
// NormalizedOnlineBalance wraps basics.NormalizedOnlineAccountBalance
-func (u *AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 {
- return basics.NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, proto)
+func (u *AccountData) NormalizedOnlineBalance(genesisProto config.ConsensusParams) uint64 {
+ return basics.NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, genesisProto)
}
diff --git a/ledger/ledgercore/msgp_gen.go b/ledger/ledgercore/msgp_gen.go
index da59e0dae..0d6447a80 100644
--- a/ledger/ledgercore/msgp_gen.go
+++ b/ledger/ledgercore/msgp_gen.go
@@ -23,6 +23,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// OnlineRoundParamsData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z *AccountTotals) MarshalMsg(b []byte) (o []byte) {
@@ -777,3 +785,155 @@ func (z *AlgoCount) Msgsize() (s int) {
func (z *AlgoCount) MsgIsZero() bool {
return ((*z).Money.MsgIsZero()) && ((*z).RewardUnits == 0)
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *OnlineRoundParamsData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(3)
+ var zb0001Mask uint8 /* 4 bits */
+ if (*z).OnlineSupply == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).CurrentProtocol.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).RewardsLevel == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "online"
+ o = append(o, 0xa6, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
+ o = msgp.AppendUint64(o, (*z).OnlineSupply)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "proto"
+ o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
+ o = (*z).CurrentProtocol.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "rwdlvl"
+ o = append(o, 0xa6, 0x72, 0x77, 0x64, 0x6c, 0x76, 0x6c)
+ o = msgp.AppendUint64(o, (*z).RewardsLevel)
+ }
+ }
+ return
+}
+
+func (_ *OnlineRoundParamsData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*OnlineRoundParamsData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *OnlineRoundParamsData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).OnlineSupply, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "OnlineSupply")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).CurrentProtocol.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = OnlineRoundParamsData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "online":
+ (*z).OnlineSupply, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "OnlineSupply")
+ return
+ }
+ case "rwdlvl":
+ (*z).RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "RewardsLevel")
+ return
+ }
+ case "proto":
+ bts, err = (*z).CurrentProtocol.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CurrentProtocol")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *OnlineRoundParamsData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*OnlineRoundParamsData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *OnlineRoundParamsData) Msgsize() (s int) {
+ s = 1 + 7 + msgp.Uint64Size + 7 + msgp.Uint64Size + 6 + (*z).CurrentProtocol.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *OnlineRoundParamsData) MsgIsZero() bool {
+ return ((*z).OnlineSupply == 0) && ((*z).RewardsLevel == 0) && ((*z).CurrentProtocol.MsgIsZero())
+}
diff --git a/ledger/ledgercore/msgp_gen_test.go b/ledger/ledgercore/msgp_gen_test.go
index 3dc6a7409..1f3c3a3d0 100644
--- a/ledger/ledgercore/msgp_gen_test.go
+++ b/ledger/ledgercore/msgp_gen_test.go
@@ -133,3 +133,63 @@ func BenchmarkUnmarshalAlgoCount(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalOnlineRoundParamsData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := OnlineRoundParamsData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingOnlineRoundParamsData(t *testing.T) {
+ protocol.RunEncodingTest(t, &OnlineRoundParamsData{})
+}
+
+func BenchmarkMarshalMsgOnlineRoundParamsData(b *testing.B) {
+ v := OnlineRoundParamsData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgOnlineRoundParamsData(b *testing.B) {
+ v := OnlineRoundParamsData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalOnlineRoundParamsData(b *testing.B) {
+ v := OnlineRoundParamsData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/ledger/ledgercore/onlineacct.go b/ledger/ledgercore/onlineacct.go
index d73fe876f..765067bb2 100644
--- a/ledger/ledgercore/onlineacct.go
+++ b/ledger/ledgercore/onlineacct.go
@@ -24,7 +24,7 @@ import (
// An OnlineAccount corresponds to an account whose AccountData.Status
// is Online. This is used for a Merkle tree commitment of online
// accounts, which is subsequently used to validate participants for
-// a compact certificate.
+// a state proof.
type OnlineAccount struct {
// These are a subset of the fields from the corresponding AccountData.
Address basics.Address
@@ -33,5 +33,5 @@ type OnlineAccount struct {
NormalizedOnlineBalance uint64
VoteFirstValid basics.Round
VoteLastValid basics.Round
- StateProofID merklesignature.Verifier
+ StateProofID merklesignature.Commitment
}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 7bea4e833..57bbbb607 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -19,7 +19,6 @@ package ledgercore
import (
"fmt"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -68,6 +67,12 @@ type Txlease struct {
Lease [32]byte
}
+// IncludedTransactions defines the transactions included in a block, their index and last valid round.
+type IncludedTransactions struct {
+ LastValid basics.Round
+ Intra uint64 // the index of the transaction in the block
+}
+
// StateDelta describes the delta between a given round to the previous round
type StateDelta struct {
// modified accounts
@@ -77,7 +82,7 @@ type StateDelta struct {
Accts AccountDeltas
// new Txids for the txtail and TxnCounter, mapped to txn.LastValid
- Txids map[transactions.Txid]basics.Round
+ Txids map[transactions.Txid]IncludedTransactions
// new txleases for the txtail mapped to expiration
Txleases map[Txlease]basics.Round
@@ -88,9 +93,9 @@ type StateDelta struct {
// new block header; read-only
Hdr *bookkeeping.BlockHeader
- // next round for which we expect a compact cert.
- // zero if no compact cert is expected.
- CompactCertNext basics.Round
+ // next round for which we expect a state proof.
+ // zero if no state proof is expected.
+ StateProofNext basics.Round
// previous block timestamp
PrevTimestamp int64
@@ -173,15 +178,15 @@ type AccountDeltas struct {
// MakeStateDelta creates a new instance of StateDelta.
// hint is amount of transactions for evaluation, 2 * hint is for sender and receiver balance records.
// This does not play well for AssetConfig and ApplicationCall transactions on scale
-func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, compactCertNext basics.Round) StateDelta {
+func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, stateProofNext basics.Round) StateDelta {
return StateDelta{
Accts: MakeAccountDeltas(hint),
- Txids: make(map[transactions.Txid]basics.Round, hint),
- Txleases: make(map[Txlease]basics.Round, hint),
+ Txids: make(map[transactions.Txid]IncludedTransactions, hint),
+ Txleases: make(map[Txlease]basics.Round),
// asset or application creation are considered as rare events so do not pre-allocate space for them
Creatables: make(map[basics.CreatableIndex]ModifiedCreatable),
Hdr: hdr,
- CompactCertNext: compactCertNext,
+ StateProofNext: stateProofNext,
PrevTimestamp: prevTimestamp,
initialTransactionsCount: hint,
}
@@ -388,10 +393,11 @@ func (ad *AccountDeltas) UpsertAssetResource(addr basics.Address, aidx basics.As
// OptimizeAllocatedMemory by reallocating maps to needed capacity
// For each data structure, reallocate if it would save us at least 50MB aggregate
-func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) {
+// If provided maxBalLookback or maxTxnLife are zero, dependent optimizations will not occur.
+func (sd *StateDelta) OptimizeAllocatedMemory(maxBalLookback uint64) {
// accts takes up 232 bytes per entry, and is saved for 320 rounds
- if uint64(cap(sd.Accts.accts)-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold {
- accts := make([]NewBalanceRecord, len(sd.Accts.acctsCache))
+ if uint64(cap(sd.Accts.accts)-len(sd.Accts.accts))*accountArrayEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
+ accts := make([]NewBalanceRecord, len(sd.Accts.accts))
copy(accts, sd.Accts.accts)
sd.Accts.accts = accts
}
@@ -399,32 +405,13 @@ func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) {
// acctsCache takes up 64 bytes per entry, and is saved for 320 rounds
// realloc if original allocation capacity greater than length of data, and space difference is significant
if 2*sd.initialTransactionsCount > len(sd.Accts.acctsCache) &&
- uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold {
+ uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache))
for k, v := range sd.Accts.acctsCache {
acctsCache[k] = v
}
sd.Accts.acctsCache = acctsCache
}
-
- // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds
- if sd.initialTransactionsCount > len(sd.Txleases) &&
- uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife > stateDeltaTargetOptimizationThreshold {
- txLeases := make(map[Txlease]basics.Round, len(sd.Txleases))
- for k, v := range sd.Txleases {
- txLeases[k] = v
- }
- sd.Txleases = txLeases
- }
-
- // Creatables takes up 100 bytes per entry, and is saved for 320 rounds
- if uint64(len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold {
- creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables))
- for k, v := range sd.Creatables {
- creatableDeltas[k] = v
- }
- sd.Creatables = creatableDeltas
- }
}
// GetBasicsAccountData returns basics account data for some specific address
diff --git a/ledger/ledgercore/totals.go b/ledger/ledgercore/totals.go
index 3f62e077b..291db77a2 100644
--- a/ledger/ledgercore/totals.go
+++ b/ledger/ledgercore/totals.go
@@ -20,6 +20,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
)
// AlgoCount represents a total of algos of a certain class
@@ -52,6 +53,15 @@ type AccountTotals struct {
RewardsLevel uint64 `codec:"rwdlvl"`
}
+// OnlineRoundParamsData keeps track of parameters needed for agreement from maxBalLookback ago
+type OnlineRoundParamsData struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ OnlineSupply uint64 `codec:"online"`
+ RewardsLevel uint64 `codec:"rwdlvl"`
+ CurrentProtocol protocol.ConsensusVersion `codec:"proto"`
+}
+
func (at *AccountTotals) statusField(status basics.Status) *AlgoCount {
switch status {
case basics.Online:
diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go
index ced0d35ed..4dfb39da2 100644
--- a/ledger/ledgercore/votersForRound.go
+++ b/ledger/ledgercore/votersForRound.go
@@ -18,18 +18,27 @@ package ledgercore
import (
"fmt"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"sync"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
)
+// OnlineAccountsFetcher captures the functionality of querying online accounts status
+type OnlineAccountsFetcher interface {
+ // TopOnlineAccounts returns the top n online accounts, sorted by their normalized
+ // balance and address, whose voting keys are valid in voteRnd. See the
+ // normalization description in AccountData.NormalizedOnlineBalance().
+ TopOnlineAccounts(rnd basics.Round, voteRnd basics.Round, n uint64) (topOnlineAccounts []*OnlineAccount, totalOnlineStake basics.MicroAlgos, err error)
+}
+
// VotersForRound tracks the top online voting accounts as of a particular
// round, along with a Merkle tree commitment to those voting accounts.
type VotersForRound struct {
@@ -51,7 +60,7 @@ type VotersForRound struct {
// in participants.
Proto config.ConsensusParams
- // Participants is the array of top #CompactCertVoters online accounts
+ // Participants is the array of top StateProofTopVoters online accounts
// in this round, sorted by normalized balance (to make sure heavyweight
// accounts are biased to the front).
Participants basics.ParticipantsArray
@@ -68,9 +77,6 @@ type VotersForRound struct {
TotalWeight basics.MicroAlgos
}
-// TopOnlineAccounts is the function signature for a method that would return the top online accounts.
-type TopOnlineAccounts func(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*OnlineAccount, error)
-
// MakeVotersForRound create a new VotersForRound object and initialize it's cond.
func MakeVotersForRound() *VotersForRound {
vr := &VotersForRound{}
@@ -78,22 +84,42 @@ func MakeVotersForRound() *VotersForRound {
return vr
}
-// LoadTree todo
-func (tr *VotersForRound) LoadTree(onlineTop TopOnlineAccounts, hdr bookkeeping.BlockHeader) error {
+func createStateProofParticipant(stateProofID *merklesignature.Commitment, money basics.MicroAlgos) basics.Participant {
+ var retPart basics.Participant
+ retPart.Weight = money.ToUint64()
+ // Some accounts might not have StateProof keys commitment. As a result,
+ // the commitment would be an array filled with zeroes: [0x0...0x0].
+ // Since the commitment is created using the subset-sum hash function, for which the
+ // value [0x0..0x0] might be known, we avoid using such empty commitments.
+ // We replace it with a commitment for zero keys..
+ if stateProofID.IsEmpty() {
+ copy(retPart.PK.Commitment[:], merklesignature.NoKeysCommitment[:])
+ } else {
+ copy(retPart.PK.Commitment[:], stateProofID[:])
+
+ }
+ // KeyLifetime is set as a default value here (256) as the currently registered StateProof keys do not have a KeyLifetime value associated with them.
+ // In order to support changing the KeyLifetime in the future, we would need to update the Keyreg transaction and replace the value here with the one
+ // registered by the Account.
+ retPart.PK.KeyLifetime = merklesignature.KeyLifetimeDefault
+ return retPart
+}
+
+// LoadTree loads the participation tree and other required fields, using the provided OnlineAccountsFetcher.
+func (tr *VotersForRound) LoadTree(onlineAccountsFetcher OnlineAccountsFetcher, hdr bookkeeping.BlockHeader) error {
r := hdr.Round
- // certRound is the block that we expect to form a compact certificate for,
+ // stateProofRound is the block that we expect to form a state proof for,
// using the balances from round r.
- certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds)
+ stateProofRound := r + basics.Round(tr.Proto.StateProofVotersLookback+tr.Proto.StateProofInterval)
- top, err := onlineTop(r, certRound, tr.Proto.CompactCertTopVoters)
+ top, totalOnlineWeight, err := onlineAccountsFetcher.TopOnlineAccounts(r, stateProofRound, tr.Proto.StateProofTopVoters)
if err != nil {
return err
}
participants := make(basics.ParticipantsArray, len(top))
addrToPos := make(map[basics.Address]uint64)
- var totalWeight basics.MicroAlgos
for i, acct := range top {
var ot basics.OverflowTracker
@@ -103,19 +129,11 @@ func (tr *VotersForRound) LoadTree(onlineTop TopOnlineAccounts, hdr bookkeeping.
return fmt.Errorf("votersTracker.LoadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards)
}
- totalWeight = ot.AddA(totalWeight, money)
- if ot.Overflowed {
- return fmt.Errorf("votersTracker.LoadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64())
- }
-
- participants[i] = basics.Participant{
- PK: acct.StateProofID,
- Weight: money.ToUint64(),
- }
+ participants[i] = createStateProofParticipant(&acct.StateProofID, money)
addrToPos[acct.Address] = uint64(i)
}
- tree, err := merklearray.BuildVectorCommitmentTree(participants, crypto.HashFactory{HashType: compactcert.HashType})
+ tree, err := merklearray.BuildVectorCommitmentTree(participants, crypto.HashFactory{HashType: stateproof.HashType})
if err != nil {
return err
}
@@ -123,7 +141,7 @@ func (tr *VotersForRound) LoadTree(onlineTop TopOnlineAccounts, hdr bookkeeping.
tr.mu.Lock()
tr.AddrToPos = addrToPos
tr.Participants = participants
- tr.TotalWeight = totalWeight
+ tr.TotalWeight = totalOnlineWeight
tr.Tree = tree
tr.cond.Broadcast()
tr.mu.Unlock()
diff --git a/ledger/lruonlineaccts.go b/ledger/lruonlineaccts.go
new file mode 100644
index 000000000..ae05c497c
--- /dev/null
+++ b/ledger/lruonlineaccts.go
@@ -0,0 +1,121 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+)
+
+// lruAccounts provides a storage class for the most recently used accounts data.
+// It doesn't have any synchronization primitive on its own and so is required to be
+// synchronized by the caller.
+type lruOnlineAccounts struct {
+ // accountsList contain the list of persistedAccountData, where the front ones are the most "fresh"
+ // and the ones on the back are the oldest.
+ accountsList *persistedOnlineAccountDataList
+ // accounts provides fast access to the various elements in the list by using the account address
+ accounts map[basics.Address]*persistedOnlineAccountDataListNode
+ // pendingAccounts are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
+ // it would call flushPendingWrites and these would be merged into the accounts/accountsList
+ pendingAccounts chan persistedOnlineAccountData
+ // log interface; used for logging the threshold event.
+ log logging.Logger
+ // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries
+ pendingWritesWarnThreshold int
+}
+
+// init initializes the lruAccounts for use.
+// thread locking semantics : write lock
+func (m *lruOnlineAccounts) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
+ m.accountsList = newPersistedOnlineAccountList().allocateFreeNodes(pendingWrites)
+ m.accounts = make(map[basics.Address]*persistedOnlineAccountDataListNode, pendingWrites)
+ m.pendingAccounts = make(chan persistedOnlineAccountData, pendingWrites)
+ m.log = log
+ m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
+}
+
+// read the persistedAccountData object that the lruAccounts has for the given address.
+// thread locking semantics : read lock
+func (m *lruOnlineAccounts) read(addr basics.Address) (data persistedOnlineAccountData, has bool) {
+ if el := m.accounts[addr]; el != nil {
+ return *el.Value, true
+ }
+ return persistedOnlineAccountData{}, false
+}
+
+// flushPendingWrites flushes the pending writes to the main lruAccounts cache.
+// thread locking semantics : write lock
+func (m *lruOnlineAccounts) flushPendingWrites() {
+ pendingEntriesCount := len(m.pendingAccounts)
+ if pendingEntriesCount >= m.pendingWritesWarnThreshold {
+ m.log.Warnf("lruOnlineAccounts: number of entries in pendingAccounts(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
+ }
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case pendingAccountData := <-m.pendingAccounts:
+ m.write(pendingAccountData)
+ default:
+ return
+ }
+ }
+}
+
+// writePending write a single persistedOnlineAccountData entry to the pendingAccounts buffer.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruOnlineAccounts) writePending(acct persistedOnlineAccountData) {
+ select {
+ case m.pendingAccounts <- acct:
+ default:
+ }
+}
+
+// write a single persistedAccountData to the lruAccounts cache.
+// when writing the entry, the round number would be used to determine if it's a newer
+// version of what's already on the cache or not. In all cases, the entry is going
+// to be promoted to the front of the list.
+// thread locking semantics : write lock
+func (m *lruOnlineAccounts) write(acctData persistedOnlineAccountData) {
+ if el := m.accounts[acctData.addr]; el != nil {
+ // already exists; is it a newer ?
+ if el.Value.before(&acctData) {
+ // we update with a newer version.
+ el.Value = &acctData
+ }
+ m.accountsList.moveToFront(el)
+ } else {
+ // new entry.
+ m.accounts[acctData.addr] = m.accountsList.pushFront(&acctData)
+ }
+}
+
+// prune adjust the current size of the lruAccounts cache, by dropping the least
+// recently used entries.
+// thread locking semantics : write lock
+func (m *lruOnlineAccounts) prune(newSize int) (removed int) {
+ for {
+ if len(m.accounts) <= newSize {
+ break
+ }
+ back := m.accountsList.back()
+ delete(m.accounts, back.Value.addr)
+ m.accountsList.remove(back)
+ removed++
+ }
+ return
+}
diff --git a/ledger/lruonlineaccts_test.go b/ledger/lruonlineaccts_test.go
new file mode 100644
index 000000000..ca846f6c5
--- /dev/null
+++ b/ledger/lruonlineaccts_test.go
@@ -0,0 +1,196 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestLRUOnlineAccountsBasic(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseOnlineAcct lruOnlineAccounts
+ baseOnlineAcct.init(logging.TestingLog(t), 10, 5)
+
+ accountsNum := 50
+ // write 50 accounts
+ for i := 0; i < accountsNum; i++ {
+ acct := persistedOnlineAccountData{
+ addr: basics.Address(crypto.Hash([]byte{byte(i)})),
+ round: basics.Round(i),
+ rowid: int64(i),
+ accountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}},
+ }
+ baseOnlineAcct.write(acct)
+ }
+
+ // verify that all these accounts are truly there.
+ for i := 0; i < accountsNum; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ acct, has := baseOnlineAcct.read(addr)
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), acct.round)
+ require.Equal(t, addr, acct.addr)
+ require.Equal(t, uint64(i), acct.accountData.MicroAlgos.Raw)
+ require.Equal(t, int64(i), acct.rowid)
+ }
+
+ // verify expected missing entries
+ for i := accountsNum; i < accountsNum*2; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ acct, has := baseOnlineAcct.read(addr)
+ require.False(t, has)
+ require.Equal(t, persistedOnlineAccountData{}, acct)
+ }
+
+ baseOnlineAcct.prune(accountsNum / 2)
+
+ // verify expected (missing/existing) entries
+ for i := 0; i < accountsNum*2; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ acct, has := baseOnlineAcct.read(addr)
+
+ if i >= accountsNum/2 && i < accountsNum {
+ // expected to have it.
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), acct.round)
+ require.Equal(t, addr, acct.addr)
+ require.Equal(t, uint64(i), acct.accountData.MicroAlgos.Raw)
+ require.Equal(t, int64(i), acct.rowid)
+ } else {
+ require.False(t, has)
+ require.Equal(t, persistedOnlineAccountData{}, acct)
+ }
+ }
+}
+
+func TestLRUOnlineAccountsPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseOnlineAcct lruOnlineAccounts
+ accountsNum := 250
+ baseOnlineAcct.init(logging.TestingLog(t), accountsNum*2, accountsNum)
+
+ for i := 0; i < accountsNum; i++ {
+ go func(i int) {
+ time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
+ acct := persistedOnlineAccountData{
+ addr: basics.Address(crypto.Hash([]byte{byte(i)})),
+ round: basics.Round(i),
+ rowid: int64(i),
+ accountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}},
+ }
+ baseOnlineAcct.writePending(acct)
+ }(i)
+ }
+ testStarted := time.Now()
+ for {
+ baseOnlineAcct.flushPendingWrites()
+ // check if all accounts were loaded into "main" cache.
+ allAccountsLoaded := true
+ for i := 0; i < accountsNum; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ _, has := baseOnlineAcct.read(addr)
+ if !has {
+ allAccountsLoaded = false
+ break
+ }
+ }
+ if allAccountsLoaded {
+ break
+ }
+ if time.Since(testStarted).Seconds() > 20 {
+ require.Fail(t, "failed after waiting for 20 second")
+ }
+ // not yet, keep looping.
+ }
+}
+
+func TestLRUOnlineAccountsPendingWritesWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseOnlineAcct lruOnlineAccounts
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruAccountsTestLogger{Logger: logging.TestingLog(t)}
+ baseOnlineAcct.init(log, pendingWritesBuffer, pendingWritesThreshold)
+ for j := 0; j < 50; j++ {
+ for i := 0; i < j; i++ {
+ acct := persistedOnlineAccountData{
+ addr: basics.Address(crypto.Hash([]byte{byte(i)})),
+ round: basics.Round(i),
+ rowid: int64(i),
+ accountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}},
+ }
+ baseOnlineAcct.writePending(acct)
+ }
+ baseOnlineAcct.flushPendingWrites()
+ if j >= pendingWritesThreshold {
+ // expect a warning in the log
+ require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
+ }
+ }
+}
+
+func TestLRUOnlineAccountsOmittedPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseOnlineAcct lruOnlineAccounts
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruAccountsTestLogger{Logger: logging.TestingLog(t)}
+ baseOnlineAcct.init(log, pendingWritesBuffer, pendingWritesThreshold)
+
+ for i := 0; i < pendingWritesBuffer*2; i++ {
+ acct := persistedOnlineAccountData{
+ addr: basics.Address(crypto.Hash([]byte{byte(i)})),
+ round: basics.Round(i),
+ rowid: int64(i),
+ accountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}},
+ }
+ baseOnlineAcct.writePending(acct)
+ }
+
+ baseOnlineAcct.flushPendingWrites()
+
+ // verify that all these accounts are truly there.
+ for i := 0; i < pendingWritesBuffer; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ acct, has := baseOnlineAcct.read(addr)
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), acct.round)
+ require.Equal(t, addr, acct.addr)
+ require.Equal(t, uint64(i), acct.accountData.MicroAlgos.Raw)
+ require.Equal(t, int64(i), acct.rowid)
+ }
+
+ // verify expected missing entries
+ for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
+ addr := basics.Address(crypto.Hash([]byte{byte(i)}))
+ acct, has := baseOnlineAcct.read(addr)
+ require.False(t, has)
+ require.Equal(t, persistedOnlineAccountData{}, acct)
+ }
+}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index 31c0c8224..58824b2ae 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -9,6 +9,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
)
// The following msgp objects are implemented in this file:
@@ -44,6 +45,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// baseVotingData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// catchpointFileBalancesChunkV5
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -60,6 +69,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// catchpointFirstStageInfo
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// catchpointState
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -100,6 +117,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// txTailRound
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// txTailRoundLease
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z CatchpointCatchupState) MarshalMsg(b []byte) (o []byte) {
@@ -420,27 +453,27 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0001Len := uint32(19)
var zb0001Mask uint32 /* 21 bits */
- if (*z).baseOnlineAccountData.VoteID.MsgIsZero() {
+ if (*z).baseVotingData.VoteID.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x1
}
- if (*z).baseOnlineAccountData.SelectionID.MsgIsZero() {
+ if (*z).baseVotingData.SelectionID.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x2
}
- if (*z).baseOnlineAccountData.VoteFirstValid.MsgIsZero() {
+ if (*z).baseVotingData.VoteFirstValid.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x4
}
- if (*z).baseOnlineAccountData.VoteLastValid.MsgIsZero() {
+ if (*z).baseVotingData.VoteLastValid.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x8
}
- if (*z).baseOnlineAccountData.VoteKeyDilution == 0 {
+ if (*z).baseVotingData.VoteKeyDilution == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
- if (*z).baseOnlineAccountData.StateProofID.MsgIsZero() {
+ if (*z).baseVotingData.StateProofID.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x20
}
@@ -502,32 +535,32 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
if (zb0001Mask & 0x1) == 0 { // if not empty
// string "A"
o = append(o, 0xa1, 0x41)
- o = (*z).baseOnlineAccountData.VoteID.MarshalMsg(o)
+ o = (*z).baseVotingData.VoteID.MarshalMsg(o)
}
if (zb0001Mask & 0x2) == 0 { // if not empty
// string "B"
o = append(o, 0xa1, 0x42)
- o = (*z).baseOnlineAccountData.SelectionID.MarshalMsg(o)
+ o = (*z).baseVotingData.SelectionID.MarshalMsg(o)
}
if (zb0001Mask & 0x4) == 0 { // if not empty
// string "C"
o = append(o, 0xa1, 0x43)
- o = (*z).baseOnlineAccountData.VoteFirstValid.MarshalMsg(o)
+ o = (*z).baseVotingData.VoteFirstValid.MarshalMsg(o)
}
if (zb0001Mask & 0x8) == 0 { // if not empty
// string "D"
o = append(o, 0xa1, 0x44)
- o = (*z).baseOnlineAccountData.VoteLastValid.MarshalMsg(o)
+ o = (*z).baseVotingData.VoteLastValid.MarshalMsg(o)
}
if (zb0001Mask & 0x10) == 0 { // if not empty
// string "E"
o = append(o, 0xa1, 0x45)
- o = msgp.AppendUint64(o, (*z).baseOnlineAccountData.VoteKeyDilution)
+ o = msgp.AppendUint64(o, (*z).baseVotingData.VoteKeyDilution)
}
if (zb0001Mask & 0x20) == 0 { // if not empty
// string "F"
o = append(o, 0xa1, 0x46)
- o = (*z).baseOnlineAccountData.StateProofID.MarshalMsg(o)
+ o = (*z).baseVotingData.StateProofID.MarshalMsg(o)
}
if (zb0001Mask & 0x100) == 0 { // if not empty
// string "a"
@@ -714,7 +747,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).baseOnlineAccountData.VoteID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteID")
return
@@ -722,7 +755,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).baseOnlineAccountData.SelectionID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.SelectionID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SelectionID")
return
@@ -730,7 +763,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).baseOnlineAccountData.VoteFirstValid.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteFirstValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteFirstValid")
return
@@ -738,7 +771,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).baseOnlineAccountData.VoteLastValid.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteLastValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteLastValid")
return
@@ -746,7 +779,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- (*z).baseOnlineAccountData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).baseVotingData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
return
@@ -754,7 +787,7 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).baseOnlineAccountData.StateProofID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.StateProofID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofID")
return
@@ -864,37 +897,37 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "A":
- bts, err = (*z).baseOnlineAccountData.VoteID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "VoteID")
return
}
case "B":
- bts, err = (*z).baseOnlineAccountData.SelectionID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.SelectionID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "SelectionID")
return
}
case "C":
- bts, err = (*z).baseOnlineAccountData.VoteFirstValid.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteFirstValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "VoteFirstValid")
return
}
case "D":
- bts, err = (*z).baseOnlineAccountData.VoteLastValid.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.VoteLastValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "VoteLastValid")
return
}
case "E":
- (*z).baseOnlineAccountData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ (*z).baseVotingData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "VoteKeyDilution")
return
}
case "F":
- bts, err = (*z).baseOnlineAccountData.StateProofID.UnmarshalMsg(bts)
+ bts, err = (*z).baseVotingData.StateProofID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofID")
return
@@ -925,19 +958,286 @@ func (_ *baseAccountData) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *baseAccountData) Msgsize() (s int) {
- s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseOnlineAccountData.VoteID.Msgsize() + 2 + (*z).baseOnlineAccountData.SelectionID.Msgsize() + 2 + (*z).baseOnlineAccountData.VoteFirstValid.Msgsize() + 2 + (*z).baseOnlineAccountData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseOnlineAccountData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
+ s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *baseAccountData) MsgIsZero() bool {
- return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).baseOnlineAccountData.VoteID.MsgIsZero()) && ((*z).baseOnlineAccountData.SelectionID.MsgIsZero()) && ((*z).baseOnlineAccountData.VoteFirstValid.MsgIsZero()) && ((*z).baseOnlineAccountData.VoteLastValid.MsgIsZero()) && ((*z).baseOnlineAccountData.VoteKeyDilution == 0) && ((*z).baseOnlineAccountData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
+ return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
}
// MarshalMsg implements msgp.Marshaler
func (z *baseOnlineAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
+ zb0001Len := uint32(8)
+ var zb0001Mask uint16 /* 10 bits */
+ if (*z).baseVotingData.VoteID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ if (*z).baseVotingData.SelectionID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).baseVotingData.VoteFirstValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).baseVotingData.VoteLastValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).baseVotingData.VoteKeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).baseVotingData.StateProofID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ if (*z).MicroAlgos.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ if (*z).RewardsBase == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x80
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "A"
+ o = append(o, 0xa1, 0x41)
+ o = (*z).baseVotingData.VoteID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "B"
+ o = append(o, 0xa1, 0x42)
+ o = (*z).baseVotingData.SelectionID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "C"
+ o = append(o, 0xa1, 0x43)
+ o = (*z).baseVotingData.VoteFirstValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "D"
+ o = append(o, 0xa1, 0x44)
+ o = (*z).baseVotingData.VoteLastValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "E"
+ o = append(o, 0xa1, 0x45)
+ o = msgp.AppendUint64(o, (*z).baseVotingData.VoteKeyDilution)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "F"
+ o = append(o, 0xa1, 0x46)
+ o = (*z).baseVotingData.StateProofID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "Y"
+ o = append(o, 0xa1, 0x59)
+ o = (*z).MicroAlgos.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x80) == 0 { // if not empty
+ // string "Z"
+ o = append(o, 0xa1, 0x5a)
+ o = msgp.AppendUint64(o, (*z).RewardsBase)
+ }
+ }
+ return
+}
+
+func (_ *baseOnlineAccountData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*baseOnlineAccountData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *baseOnlineAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).baseVotingData.SelectionID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SelectionID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).baseVotingData.VoteFirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteFirstValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).baseVotingData.VoteLastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteLastValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).baseVotingData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).baseVotingData.StateProofID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).MicroAlgos.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MicroAlgos")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).RewardsBase, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "RewardsBase")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = baseOnlineAccountData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "A":
+ bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteID")
+ return
+ }
+ case "B":
+ bts, err = (*z).baseVotingData.SelectionID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SelectionID")
+ return
+ }
+ case "C":
+ bts, err = (*z).baseVotingData.VoteFirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteFirstValid")
+ return
+ }
+ case "D":
+ bts, err = (*z).baseVotingData.VoteLastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteLastValid")
+ return
+ }
+ case "E":
+ (*z).baseVotingData.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteKeyDilution")
+ return
+ }
+ case "F":
+ bts, err = (*z).baseVotingData.StateProofID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofID")
+ return
+ }
+ case "Y":
+ bts, err = (*z).MicroAlgos.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MicroAlgos")
+ return
+ }
+ case "Z":
+ (*z).RewardsBase, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "RewardsBase")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *baseOnlineAccountData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*baseOnlineAccountData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *baseOnlineAccountData) Msgsize() (s int) {
+ s = 1 + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *baseOnlineAccountData) MsgIsZero() bool {
+ return ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *baseVotingData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 7 bits */
if (*z).VoteID.MsgIsZero() {
@@ -1001,13 +1301,13 @@ func (z *baseOnlineAccountData) MarshalMsg(b []byte) (o []byte) {
return
}
-func (_ *baseOnlineAccountData) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*baseOnlineAccountData)
+func (_ *baseVotingData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*baseVotingData)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *baseOnlineAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *baseVotingData) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 int
@@ -1080,7 +1380,7 @@ func (z *baseOnlineAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
if zb0002 {
- (*z) = baseOnlineAccountData{}
+ (*z) = baseVotingData{}
}
for zb0001 > 0 {
zb0001--
@@ -1139,19 +1439,19 @@ func (z *baseOnlineAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
-func (_ *baseOnlineAccountData) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*baseOnlineAccountData)
+func (_ *baseVotingData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*baseVotingData)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *baseOnlineAccountData) Msgsize() (s int) {
+func (z *baseVotingData) Msgsize() (s int) {
s = 1 + 2 + (*z).VoteID.Msgsize() + 2 + (*z).SelectionID.Msgsize() + 2 + (*z).VoteFirstValid.Msgsize() + 2 + (*z).VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).StateProofID.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
-func (z *baseOnlineAccountData) MsgIsZero() bool {
+func (z *baseVotingData) MsgIsZero() bool {
return ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).VoteFirstValid.MsgIsZero()) && ((*z).VoteLastValid.MsgIsZero()) && ((*z).VoteKeyDilution == 0) && ((*z).StateProofID.MsgIsZero())
}
@@ -1622,6 +1922,204 @@ func (z *catchpointFileBalancesChunkV6) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *catchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(5)
+ var zb0001Mask uint8 /* 6 bits */
+ if (*z).Totals.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).TotalAccounts == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).BiggestChunkLen == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).TotalChunks == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).TrieBalancesHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "accountTotals"
+ o = append(o, 0xad, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73)
+ o = (*z).Totals.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "accountsCount"
+ o = append(o, 0xad, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).TotalAccounts)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "biggestChunk"
+ o = append(o, 0xac, 0x62, 0x69, 0x67, 0x67, 0x65, 0x73, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b)
+ o = msgp.AppendUint64(o, (*z).BiggestChunkLen)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "chunksCount"
+ o = append(o, 0xab, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).TotalChunks)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "trieBalancesHash"
+ o = append(o, 0xb0, 0x74, 0x72, 0x69, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68)
+ o = (*z).TrieBalancesHash.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *catchpointFirstStageInfo) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFirstStageInfo)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *catchpointFirstStageInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Totals.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Totals")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).TrieBalancesHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TrieBalancesHash")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TotalAccounts, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalAccounts")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalChunks")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).BiggestChunkLen, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "BiggestChunkLen")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = catchpointFirstStageInfo{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "accountTotals":
+ bts, err = (*z).Totals.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Totals")
+ return
+ }
+ case "trieBalancesHash":
+ bts, err = (*z).TrieBalancesHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TrieBalancesHash")
+ return
+ }
+ case "accountsCount":
+ (*z).TotalAccounts, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalAccounts")
+ return
+ }
+ case "chunksCount":
+ (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalChunks")
+ return
+ }
+ case "biggestChunk":
+ (*z).BiggestChunkLen, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "BiggestChunkLen")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *catchpointFirstStageInfo) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFirstStageInfo)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *catchpointFirstStageInfo) Msgsize() (s int) {
+ s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *catchpointFirstStageInfo) MsgIsZero() bool {
+ return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z catchpointState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
@@ -2800,3 +3298,459 @@ func (z *resourcesData) Msgsize() (s int) {
func (z *resourcesData) MsgIsZero() bool {
return ((*z).Total == 0) && ((*z).Decimals == 0) && ((*z).DefaultFrozen == false) && ((*z).UnitName == "") && ((*z).AssetName == "") && ((*z).URL == "") && ((*z).MetadataHash == ([32]byte{})) && ((*z).Manager.MsgIsZero()) && ((*z).Reserve.MsgIsZero()) && ((*z).Freeze.MsgIsZero()) && ((*z).Clawback.MsgIsZero()) && ((*z).Amount == 0) && ((*z).Frozen == false) && ((*z).SchemaNumUint == 0) && ((*z).SchemaNumByteSlice == 0) && ((*z).KeyValue.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).GlobalState.MsgIsZero()) && ((*z).LocalStateSchemaNumUint == 0) && ((*z).LocalStateSchemaNumByteSlice == 0) && ((*z).GlobalStateSchemaNumUint == 0) && ((*z).GlobalStateSchemaNumByteSlice == 0) && ((*z).ExtraProgramPages == 0) && ((*z).ResourceFlags == 0) && ((*z).UpdateRound == 0)
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *txTailRound) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0004Len := uint32(4)
+ var zb0004Mask uint8 /* 5 bits */
+ if (*z).Hdr.MsgIsZero() {
+ zb0004Len--
+ zb0004Mask |= 0x2
+ }
+ if len((*z).TxnIDs) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4
+ }
+ if len((*z).Leases) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x8
+ }
+ if len((*z).LastValid) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x10
+ }
+ // variable map header, size zb0004Len
+ o = append(o, 0x80|uint8(zb0004Len))
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x2) == 0 { // if not empty
+ // string "h"
+ o = append(o, 0xa1, 0x68)
+ o = (*z).Hdr.MarshalMsg(o)
+ }
+ if (zb0004Mask & 0x4) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ if (*z).TxnIDs == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).TxnIDs)))
+ }
+ for zb0001 := range (*z).TxnIDs {
+ o = (*z).TxnIDs[zb0001].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x8) == 0 { // if not empty
+ // string "l"
+ o = append(o, 0xa1, 0x6c)
+ if (*z).Leases == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Leases)))
+ }
+ for zb0003 := range (*z).Leases {
+ o = (*z).Leases[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x10) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ if (*z).LastValid == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).LastValid)))
+ }
+ for zb0002 := range (*z).LastValid {
+ o = (*z).LastValid[zb0002].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *txTailRound) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*txTailRound)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *txTailRound) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TxnIDs")
+ return
+ }
+ if zb0007 {
+ (*z).TxnIDs = nil
+ } else if (*z).TxnIDs != nil && cap((*z).TxnIDs) >= zb0006 {
+ (*z).TxnIDs = ((*z).TxnIDs)[:zb0006]
+ } else {
+ (*z).TxnIDs = make([]transactions.Txid, zb0006)
+ }
+ for zb0001 := range (*z).TxnIDs {
+ bts, err = (*z).TxnIDs[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TxnIDs", zb0001)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastValid")
+ return
+ }
+ if zb0009 {
+ (*z).LastValid = nil
+ } else if (*z).LastValid != nil && cap((*z).LastValid) >= zb0008 {
+ (*z).LastValid = ((*z).LastValid)[:zb0008]
+ } else {
+ (*z).LastValid = make([]basics.Round, zb0008)
+ }
+ for zb0002 := range (*z).LastValid {
+ bts, err = (*z).LastValid[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastValid", zb0002)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Leases")
+ return
+ }
+ if zb0011 {
+ (*z).Leases = nil
+ } else if (*z).Leases != nil && cap((*z).Leases) >= zb0010 {
+ (*z).Leases = ((*z).Leases)[:zb0010]
+ } else {
+ (*z).Leases = make([]txTailRoundLease, zb0010)
+ }
+ for zb0003 := range (*z).Leases {
+ bts, err = (*z).Leases[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Leases", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ bts, err = (*z).Hdr.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Hdr")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0005 {
+ (*z) = txTailRound{}
+ }
+ for zb0004 > 0 {
+ zb0004--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "i":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TxnIDs")
+ return
+ }
+ if zb0013 {
+ (*z).TxnIDs = nil
+ } else if (*z).TxnIDs != nil && cap((*z).TxnIDs) >= zb0012 {
+ (*z).TxnIDs = ((*z).TxnIDs)[:zb0012]
+ } else {
+ (*z).TxnIDs = make([]transactions.Txid, zb0012)
+ }
+ for zb0001 := range (*z).TxnIDs {
+ bts, err = (*z).TxnIDs[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TxnIDs", zb0001)
+ return
+ }
+ }
+ case "v":
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastValid")
+ return
+ }
+ if zb0015 {
+ (*z).LastValid = nil
+ } else if (*z).LastValid != nil && cap((*z).LastValid) >= zb0014 {
+ (*z).LastValid = ((*z).LastValid)[:zb0014]
+ } else {
+ (*z).LastValid = make([]basics.Round, zb0014)
+ }
+ for zb0002 := range (*z).LastValid {
+ bts, err = (*z).LastValid[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastValid", zb0002)
+ return
+ }
+ }
+ case "l":
+ var zb0016 int
+ var zb0017 bool
+ zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Leases")
+ return
+ }
+ if zb0017 {
+ (*z).Leases = nil
+ } else if (*z).Leases != nil && cap((*z).Leases) >= zb0016 {
+ (*z).Leases = ((*z).Leases)[:zb0016]
+ } else {
+ (*z).Leases = make([]txTailRoundLease, zb0016)
+ }
+ for zb0003 := range (*z).Leases {
+ bts, err = (*z).Leases[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Leases", zb0003)
+ return
+ }
+ }
+ case "h":
+ bts, err = (*z).Hdr.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Hdr")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *txTailRound) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*txTailRound)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *txTailRound) Msgsize() (s int) {
+ s = 1 + 2 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).TxnIDs {
+ s += (*z).TxnIDs[zb0001].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).LastValid {
+ s += (*z).LastValid[zb0002].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).Leases {
+ s += (*z).Leases[zb0003].Msgsize()
+ }
+ s += 2 + (*z).Hdr.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *txTailRound) MsgIsZero() bool {
+ return (len((*z).TxnIDs) == 0) && (len((*z).LastValid) == 0) && (len((*z).Leases) == 0) && ((*z).Hdr.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *txTailRoundLease) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(3)
+ var zb0002Mask uint8 /* 4 bits */
+ if (*z).TxnIdx == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x1
+ }
+ if (*z).Lease == ([32]byte{}) {
+ zb0002Len--
+ zb0002Mask |= 0x4
+ }
+ if (*z).Sender.MsgIsZero() {
+ zb0002Len--
+ zb0002Mask |= 0x8
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x1) == 0 { // if not empty
+ // string "TxnIdx"
+ o = append(o, 0xa6, 0x54, 0x78, 0x6e, 0x49, 0x64, 0x78)
+ o = msgp.AppendUint64(o, (*z).TxnIdx)
+ }
+ if (zb0002Mask & 0x4) == 0 { // if not empty
+ // string "l"
+ o = append(o, 0xa1, 0x6c)
+ o = msgp.AppendBytes(o, ((*z).Lease)[:])
+ }
+ if (zb0002Mask & 0x8) == 0 { // if not empty
+ // string "s"
+ o = append(o, 0xa1, 0x73)
+ o = (*z).Sender.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *txTailRoundLease) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*txTailRoundLease)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *txTailRoundLease) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).Sender.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Sender")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Lease)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Lease")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).TxnIdx, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TxnIdx")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = txTailRoundLease{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "s":
+ bts, err = (*z).Sender.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Sender")
+ return
+ }
+ case "l":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Lease)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Lease")
+ return
+ }
+ case "TxnIdx":
+ (*z).TxnIdx, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TxnIdx")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *txTailRoundLease) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*txTailRoundLease)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *txTailRoundLease) Msgsize() (s int) {
+ s = 1 + 2 + (*z).Sender.Msgsize() + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 7 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *txTailRoundLease) MsgIsZero() bool {
+ return ((*z).Sender.MsgIsZero()) && ((*z).Lease == ([32]byte{})) && ((*z).TxnIdx == 0)
+}
diff --git a/ledger/msgp_gen_test.go b/ledger/msgp_gen_test.go
index 140b1e882..165ecec8d 100644
--- a/ledger/msgp_gen_test.go
+++ b/ledger/msgp_gen_test.go
@@ -194,6 +194,66 @@ func BenchmarkUnmarshalbaseOnlineAccountData(b *testing.B) {
}
}
+func TestMarshalUnmarshalbaseVotingData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := baseVotingData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingbaseVotingData(t *testing.T) {
+ protocol.RunEncodingTest(t, &baseVotingData{})
+}
+
+func BenchmarkMarshalMsgbaseVotingData(b *testing.B) {
+ v := baseVotingData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgbaseVotingData(b *testing.B) {
+ v := baseVotingData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalbaseVotingData(b *testing.B) {
+ v := baseVotingData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalcatchpointFileBalancesChunkV5(t *testing.T) {
partitiontest.PartitionTest(t)
v := catchpointFileBalancesChunkV5{}
@@ -314,6 +374,66 @@ func BenchmarkUnmarshalcatchpointFileBalancesChunkV6(b *testing.B) {
}
}
+func TestMarshalUnmarshalcatchpointFirstStageInfo(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := catchpointFirstStageInfo{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingcatchpointFirstStageInfo(t *testing.T) {
+ protocol.RunEncodingTest(t, &catchpointFirstStageInfo{})
+}
+
+func BenchmarkMarshalMsgcatchpointFirstStageInfo(b *testing.B) {
+ v := catchpointFirstStageInfo{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgcatchpointFirstStageInfo(b *testing.B) {
+ v := catchpointFirstStageInfo{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalcatchpointFirstStageInfo(b *testing.B) {
+ v := catchpointFirstStageInfo{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalencodedBalanceRecordV5(t *testing.T) {
partitiontest.PartitionTest(t)
v := encodedBalanceRecordV5{}
@@ -493,3 +613,123 @@ func BenchmarkUnmarshalresourcesData(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshaltxTailRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := txTailRound{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingtxTailRound(t *testing.T) {
+ protocol.RunEncodingTest(t, &txTailRound{})
+}
+
+func BenchmarkMarshalMsgtxTailRound(b *testing.B) {
+ v := txTailRound{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgtxTailRound(b *testing.B) {
+ v := txTailRound{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshaltxTailRound(b *testing.B) {
+ v := txTailRound{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshaltxTailRoundLease(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := txTailRoundLease{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingtxTailRoundLease(t *testing.T) {
+ protocol.RunEncodingTest(t, &txTailRoundLease{})
+}
+
+func BenchmarkMarshalMsgtxTailRoundLease(b *testing.B) {
+ v := txTailRoundLease{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgtxTailRoundLease(b *testing.B) {
+ v := txTailRoundLease{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshaltxTailRoundLease(b *testing.B) {
+ v := txTailRoundLease{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/ledger/onlineaccountscache.go b/ledger/onlineaccountscache.go
new file mode 100644
index 000000000..2d4399409
--- /dev/null
+++ b/ledger/onlineaccountscache.go
@@ -0,0 +1,147 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "container/list"
+
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// Worst case memory usage = 2500 * 320 * 150B = 120MB
+const onlineAccountsCacheMaxSize = 2500
+
+type onlineAccountsCache struct {
+ // each List stores online account data with newest
+ // at the front, and oldest at the back.
+ accounts map[basics.Address]*list.List
+ maxCacheSize int
+}
+
+// init initializes the onlineAccountsCache for use.
+// thread locking semantics : write lock
+func (o *onlineAccountsCache) init(accts []persistedOnlineAccountData, maxCacheSize int) {
+ o.accounts = make(map[basics.Address]*list.List)
+ o.maxCacheSize = maxCacheSize
+
+ for _, acct := range accts {
+ // if cache full, stop writing
+ cachedAcct := cachedOnlineAccount{
+ baseOnlineAccountData: acct.accountData,
+ updRound: acct.updRound,
+ }
+ if !o.writeFront(acct.addr, cachedAcct) {
+ break
+ }
+ }
+}
+
+func (o *onlineAccountsCache) full() bool {
+ return len(o.accounts) >= o.maxCacheSize
+}
+
+// read the cachedOnlineAccount object that the cache has for the given address.
+// thread locking semantics : read lock
+func (o *onlineAccountsCache) read(addr basics.Address, rnd basics.Round) (cachedOnlineAccount, bool) {
+ if list := o.accounts[addr]; list != nil {
+ node := list.Back()
+ prevValue := node.Value.(*cachedOnlineAccount)
+ if prevValue.updRound > rnd {
+ return cachedOnlineAccount{}, false
+ }
+ for node.Prev() != nil {
+ node = node.Prev()
+ // only need one entry that is targetRound or older
+ currentValue := node.Value.(*cachedOnlineAccount)
+ if currentValue.updRound > rnd {
+ return *prevValue, true
+ }
+ prevValue = currentValue
+ }
+ return *prevValue, true
+ }
+ return cachedOnlineAccount{}, false
+}
+
+// write a single cachedOnlineAccount to the cache
+// thread locking semantics : write lock
+func (o *onlineAccountsCache) writeFront(addr basics.Address, acctData cachedOnlineAccount) bool {
+ var l *list.List
+ var ok bool
+ if l, ok = o.accounts[addr]; !ok {
+ if o.full() {
+ return false
+ }
+ l = list.New()
+ }
+ // do not insert if acctData would not be the newest entry in the cache
+ if l.Front() != nil && acctData.updRound <= l.Front().Value.(*cachedOnlineAccount).updRound {
+ return false
+ }
+ l.PushFront(&acctData)
+ o.accounts[addr] = l
+ return true
+}
+
+// write a single cachedOnlineAccount to the cache only if there are some history entries
+// thread locking semantics : write lock
+func (o *onlineAccountsCache) writeFrontIfExist(addr basics.Address, acctData cachedOnlineAccount) {
+ var l *list.List
+ var ok bool
+ if l, ok = o.accounts[addr]; !ok {
+ return
+ }
+ if l.Len() == 0 {
+ return
+ }
+ // do not insert if acctData would not be the newest entry in the cache
+ if l.Front() != nil && acctData.updRound <= l.Front().Value.(*cachedOnlineAccount).updRound {
+ return
+ }
+ l.PushFront(&acctData)
+ o.accounts[addr] = l
+}
+
+// prune trims the onlineAccountsCache by only keeping entries that would give account state
+// of rounds targetRound and later, repeating the deletion logic from the history DB
+// thread locking semantics : write lock
+func (o *onlineAccountsCache) prune(targetRound basics.Round) {
+ for addr, list := range o.accounts {
+ node := list.Back()
+ for node.Prev() != nil {
+ node = node.Prev()
+ // keep only one entry that is targetRound or older
+ // discard all entries older than targetRound other than the current entry
+ if node.Value.(*cachedOnlineAccount).updRound < targetRound {
+ list.Remove(node.Next())
+ } else {
+ break
+ }
+ }
+ // only one item left in cache
+ if node.Prev() == nil && node.Next() == nil {
+ if node.Value.(*cachedOnlineAccount).IsVotingEmpty() {
+ delete(o.accounts, addr)
+ }
+ }
+ }
+}
+
+// delete cache for a particular address
+func (o *onlineAccountsCache) clear(addr basics.Address) {
+ delete(o.accounts, addr)
+}
diff --git a/ledger/onlineaccountscache_test.go b/ledger/onlineaccountscache_test.go
new file mode 100644
index 000000000..42a94bb42
--- /dev/null
+++ b/ledger/onlineaccountscache_test.go
@@ -0,0 +1,240 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestOnlineAccountsCacheBasic(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ var oac onlineAccountsCache
+ const maxCacheSize = 10
+ oac.init(nil, maxCacheSize)
+
+ addr := basics.Address(crypto.Hash([]byte{byte(0)}))
+
+ roundsNum := 50
+ for i := 0; i < roundsNum; i++ {
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(i),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ written := oac.writeFront(addr, acct)
+ require.True(t, written)
+ }
+
+ // verify that all these onlineaccounts are truly there.
+ for i := 0; i < roundsNum; i++ {
+ acct, has := oac.read(addr, basics.Round(i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), acct.updRound)
+ require.Equal(t, uint64(i), acct.MicroAlgos.Raw)
+ }
+
+ for i := proto.MaxBalLookback; i < uint64(roundsNum)+proto.MaxBalLookback; i++ {
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(i),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: i}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ written := oac.writeFront(addr, acct)
+ require.True(t, written)
+ }
+
+ oac.prune(basics.Round(proto.MaxBalLookback - 1))
+
+ // verify that all these accounts are truly there.
+ acct, has := oac.read(addr, basics.Round(proto.MaxBalLookback-1))
+ require.True(t, has)
+ require.Equal(t, basics.Round(roundsNum-1), acct.updRound)
+ require.Equal(t, uint64(roundsNum-1), acct.MicroAlgos.Raw)
+
+ for i := proto.MaxBalLookback; i < uint64(roundsNum)+proto.MaxBalLookback; i++ {
+ acct, has := oac.read(addr, basics.Round(i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), acct.updRound)
+ require.Equal(t, uint64(i), acct.MicroAlgos.Raw)
+ }
+
+ _, has = oac.read(addr, basics.Round(0))
+ require.False(t, has)
+
+ // attempt to insert a value with the updRound less than latest, expect it to have ignored
+ acct = cachedOnlineAccount{
+ updRound: basics.Round(uint64(roundsNum) + proto.MaxBalLookback - 1),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: 100}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ written := oac.writeFront(addr, acct)
+ require.False(t, written)
+}
+
+func TestOnlineAccountsCachePruneOffline(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ var oac onlineAccountsCache
+ const maxCacheSize = 10
+ oac.init(nil, maxCacheSize)
+
+ addr := basics.Address(crypto.Hash([]byte{byte(0)}))
+
+ roundsNum := 50
+ for i := 0; i < roundsNum; i++ {
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(i),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ oac.writeFront(addr, acct)
+ }
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(roundsNum),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(roundsNum)}},
+ }
+ oac.writeFront(addr, acct)
+
+ _, has := oac.read(addr, basics.Round(proto.MaxBalLookback))
+ require.True(t, has)
+
+ oac.prune(basics.Round(proto.MaxBalLookback))
+
+ _, has = oac.read(addr, basics.Round(proto.MaxBalLookback))
+ require.False(t, has)
+}
+
+func TestOnlineAccountsCacheMaxEntries(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var oac onlineAccountsCache
+ const maxCacheSize = 10
+ oac.init(nil, maxCacheSize)
+ var lastAddr basics.Address
+ for i := 0; i < maxCacheSize; i++ {
+ lastAddr = ledgertesting.RandomAddress()
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(i),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(i)}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ written := oac.writeFront(lastAddr, acct)
+ require.True(t, written)
+ }
+
+ acct := cachedOnlineAccount{
+ updRound: basics.Round(100),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, baseVotingData: baseVotingData{VoteLastValid: 1000}},
+ }
+ written := oac.writeFront(ledgertesting.RandomAddress(), acct)
+ require.False(t, written)
+
+ require.Equal(t, maxCacheSize, len(oac.accounts))
+ require.True(t, oac.full())
+
+ // set one to be expired
+ acct = cachedOnlineAccount{
+ updRound: basics.Round(maxCacheSize),
+ baseOnlineAccountData: baseOnlineAccountData{MicroAlgos: basics.MicroAlgos{Raw: uint64(100)}, baseVotingData: baseVotingData{}},
+ }
+ written = oac.writeFront(lastAddr, acct)
+ require.True(t, written)
+
+ // prune too old => no effect
+ oac.prune(maxCacheSize)
+ require.Equal(t, maxCacheSize, len(oac.accounts))
+ require.True(t, oac.full())
+
+ // remove one online entry that also trigger removal the offline remaining entry as well
+ oac.prune(maxCacheSize + 1)
+ require.Equal(t, maxCacheSize-1, len(oac.accounts))
+ require.False(t, oac.full())
+
+ // ensure not written
+ oac.writeFrontIfExist(ledgertesting.RandomAddress(), acct)
+ require.Equal(t, maxCacheSize-1, len(oac.accounts))
+ require.False(t, oac.full())
+
+ // write a new address, check writeFrontIfExist after
+ addr := ledgertesting.RandomAddress()
+ written = oac.writeFront(addr, acct)
+ require.True(t, written)
+ require.Equal(t, 1, oac.accounts[addr].Len())
+ acct.updRound++
+ oac.writeFrontIfExist(addr, acct)
+ require.Equal(t, 2, oac.accounts[addr].Len())
+}
+
+var benchmarkOnlineAccountsCacheReadResult cachedOnlineAccount
+
+func benchmarkOnlineAccountsCacheRead(b *testing.B, historyLength int) {
+ // Create multiple accounts to simulate real usage and avoid excessive memory caching.
+ const numAccounts = 1000
+
+ makeAddress := func(i int) (addr basics.Address) {
+ addr[0] = byte(i)
+ return
+ }
+
+ var cache onlineAccountsCache
+ cache.init(nil, numAccounts)
+
+ // Iterate over rounds in the outer loop and accounts in the inner loop.
+ // This has large (negative) impact on lookup performance since an account's
+ // linked list nodes will not reside in memory consecutively.
+ for i := 1; i <= historyLength; i++ {
+ for j := 0; j < numAccounts; j++ {
+ cache.writeFront(makeAddress(j), cachedOnlineAccount{updRound: basics.Round(i)})
+ }
+ }
+
+ // Prevent the benchmark from using too few iterations. That would make the
+ // preparation stage above non-negligible.
+ minN := 100
+ if b.N < minN {
+ b.N = minN
+ }
+
+ var r cachedOnlineAccount
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < numAccounts; j++ {
+ r, _ = cache.read(makeAddress(j), basics.Round(historyLength))
+ }
+ }
+
+ // Prevent compiler from optimizing the code.
+ benchmarkOnlineAccountsCacheReadResult = r
+}
+
+// A typical history length.
+func BenchmarkOnlineAccountsCacheRead320(b *testing.B) {
+ benchmarkOnlineAccountsCacheRead(b, 320)
+}
+
+// A worst case history length when state proofs are delayed.
+func BenchmarkOnlineAccountsCacheRead2560(b *testing.B) {
+ benchmarkOnlineAccountsCacheRead(b, 2560)
+}
diff --git a/ledger/persistedonlineaccts_list.go b/ledger/persistedonlineaccts_list.go
new file mode 100644
index 000000000..a5c37ca8f
--- /dev/null
+++ b/ledger/persistedonlineaccts_list.go
@@ -0,0 +1,144 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+// persistedOnlineAccountDataList represents a doubly linked list.
+// must initiate with newPersistedAccountList.
+type persistedOnlineAccountDataList struct {
+ root persistedOnlineAccountDataListNode // sentinel list element, only &root, root.prev, and root.next are used
+ freeList *persistedOnlineAccountDataListNode // preallocated nodes location
+}
+
+type persistedOnlineAccountDataListNode struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *persistedOnlineAccountDataListNode
+
+ Value *persistedOnlineAccountData
+}
+
+func newPersistedOnlineAccountList() *persistedOnlineAccountDataList {
+ l := new(persistedOnlineAccountDataList)
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ // used as a helper but does not store value
+ l.freeList = new(persistedOnlineAccountDataListNode)
+
+ return l
+}
+
+func (l *persistedOnlineAccountDataList) insertNodeToFreeList(otherNode *persistedOnlineAccountDataListNode) {
+ otherNode.next = l.freeList.next
+ otherNode.prev = nil
+ otherNode.Value = nil
+
+ l.freeList.next = otherNode
+}
+
+func (l *persistedOnlineAccountDataList) getNewNode() *persistedOnlineAccountDataListNode {
+ if l.freeList.next == nil {
+ return new(persistedOnlineAccountDataListNode)
+ }
+ newNode := l.freeList.next
+ l.freeList.next = newNode.next
+
+ return newNode
+}
+
+func (l *persistedOnlineAccountDataList) allocateFreeNodes(numAllocs int) *persistedOnlineAccountDataList {
+ if l.freeList == nil {
+ return l
+ }
+ for i := 0; i < numAllocs; i++ {
+ l.insertNodeToFreeList(new(persistedOnlineAccountDataListNode))
+ }
+
+ return l
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *persistedOnlineAccountDataList) back() *persistedOnlineAccountDataListNode {
+ isEmpty := func(list *persistedOnlineAccountDataList) bool {
+ // assumes we are inserting correctly to the list - using pushFront.
+ return list.root.next == &list.root
+ }
+
+ if isEmpty(l) {
+ return nil
+ }
+ return l.root.prev
+}
+
+// remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *persistedOnlineAccountDataList) remove(e *persistedOnlineAccountDataListNode) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+
+ l.insertNodeToFreeList(e)
+}
+
+// pushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *persistedOnlineAccountDataList) pushFront(v *persistedOnlineAccountData) *persistedOnlineAccountDataListNode {
+ newNode := l.getNewNode()
+ newNode.Value = v
+ return l.insertValue(newNode, &l.root)
+}
+
+// insertValue inserts e after at, increments l.len, and returns e.
+func (l *persistedOnlineAccountDataList) insertValue(newNode *persistedOnlineAccountDataListNode, at *persistedOnlineAccountDataListNode) *persistedOnlineAccountDataListNode {
+ n := at.next
+ at.next = newNode
+ newNode.prev = at
+ newNode.next = n
+ n.prev = newNode
+
+ return newNode
+}
+
+// moveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *persistedOnlineAccountDataList) moveToFront(e *persistedOnlineAccountDataListNode) {
+ if l.root.next == e {
+ return
+ }
+ l.move(e, &l.root)
+}
+
+// move moves e to next to at and returns e.
+func (l *persistedOnlineAccountDataList) move(e, at *persistedOnlineAccountDataListNode) *persistedOnlineAccountDataListNode {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ n := at.next
+ at.next = e
+ e.prev = at
+ e.next = n
+ n.prev = e
+
+ return e
+}
diff --git a/ledger/persistedonlineaccts_list_test.go b/ledger/persistedonlineaccts_list_test.go
new file mode 100644
index 000000000..7bc0ad373
--- /dev/null
+++ b/ledger/persistedonlineaccts_list_test.go
@@ -0,0 +1,176 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func (l *persistedOnlineAccountDataList) getRoot() dataListNode {
+ return &l.root
+}
+
+func (l *persistedOnlineAccountDataListNode) getNext() dataListNode {
+ // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
+ if l.next == nil {
+ return nil
+ }
+ return l.next
+}
+
+func (l *persistedOnlineAccountDataListNode) getPrev() dataListNode {
+ if l.prev == nil {
+ return nil
+ }
+ return l.prev
+}
+
+func TestRemoveFromListOAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedOnlineAccountList()
+ e1 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{1}})
+ e2 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{2}})
+ e3 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{3}})
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e2, e1})
+
+ l.remove(e2)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e1})
+ l.remove(e3)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1})
+}
+
+func TestAddingNewNodeWithAllocatedFreeListOAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedOnlineAccountList().allocateFreeNodes(10)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{})
+ if countListSize(l.freeList) != 10 {
+ t.Errorf("free list did not allocate nodes")
+ return
+ }
+ // test elements
+ e1 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{1}})
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1})
+
+ if countListSize(l.freeList) != 9 {
+ t.Errorf("free list did not provide a node on new list entry")
+ return
+ }
+}
+
+// inspect that the list seems like the array
+func checkListPointersOAD(t *testing.T, l *persistedOnlineAccountDataList, es []*persistedOnlineAccountDataListNode) {
+ es2 := make([]dataListNode, len(es))
+ for i, el := range es {
+ es2[i] = el
+ }
+
+ checkListPointers(t, l, es2)
+}
+
+func TestMultielementListPositioningOAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedOnlineAccountList()
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{})
+ // test elements
+ e2 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{2}})
+ e1 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{1}})
+ e3 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{3}})
+ e4 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{4}})
+ e5 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{5}})
+
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e5, e4, e3, e1, e2})
+
+ l.move(e4, e1)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e5, e3, e1, e4, e2})
+
+ l.remove(e5)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e1, e4, e2})
+
+ l.move(e1, e4) // swap in middle
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e4, e1, e2})
+
+ l.moveToFront(e4)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e4, e3, e1, e2})
+
+ l.remove(e2)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e4, e3, e1})
+
+ l.moveToFront(e3) // move from middle
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e3, e4, e1})
+
+ l.moveToFront(e1) // move from end
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1, e3, e4})
+
+ l.moveToFront(e1) // no movement
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1, e3, e4})
+
+ e2 = l.pushFront(&persistedOnlineAccountData{addr: basics.Address{2}})
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1, e3, e4})
+
+ l.remove(e3) // removing from middle
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1, e4})
+
+ l.remove(e4) // removing from end
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1})
+
+ l.move(e2, e1) // swapping between two elements
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e1, e2})
+
+ l.remove(e1) // removing front
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2})
+
+ l.move(e2, l.back()) // swapping element with itself.
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2})
+
+ l.remove(e2) // remove last one
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{})
+}
+
+func TestSingleElementListPositioningOD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedOnlineAccountList()
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{})
+ e := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{1}})
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e})
+ l.moveToFront(e)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e})
+ l.remove(e)
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{})
+}
+
+func TestRemovedNodeShouldBeMovedToFreeListOAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedOnlineAccountList()
+ e1 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{1}})
+ e2 := l.pushFront(&persistedOnlineAccountData{addr: basics.Address{2}})
+
+ checkListPointersOAD(t, l, []*persistedOnlineAccountDataListNode{e2, e1})
+
+ e := l.back()
+ l.remove(e)
+
+ for i := l.freeList.next; i != nil; i = i.next {
+ if i == e {
+ // stopping the tst with good results:
+ return
+ }
+ }
+ t.Error("expected the removed node to appear at the freelist")
+}
diff --git a/ledger/testing/initState.go b/ledger/testing/initState.go
index 883517a5e..559d03b8a 100644
--- a/ledger/testing/initState.go
+++ b/ledger/testing/initState.go
@@ -60,12 +60,15 @@ func GenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoP
gensecrets[i] = x
}
- initKeys = make(map[basics.Address]*crypto.SignatureSecrets)
- initAccounts := make(map[basics.Address]basics.AccountData)
+ initKeys = make(map[basics.Address]*crypto.SignatureSecrets, len(genaddrs)+2) // + pool and sink
+ initAccounts := make(map[basics.Address]basics.AccountData, len(genaddrs)+2)
for i := range genaddrs {
initKeys[genaddrs[i]] = gensecrets[i]
// Give each account quite a bit more balance than MinFee or MinBalance
- initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
+ ad := basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
+ ad.VoteFirstValid = 1
+ ad.VoteLastValid = 100_000
+ initAccounts[genaddrs[i]] = ad
}
initKeys[poolAddr] = poolSecret
initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 860031519..0f20579a8 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -18,9 +18,11 @@ package testing
import (
"fmt"
+ "math"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
@@ -47,7 +49,7 @@ func RandomNote() []byte {
}
// RandomAccountData generates a random AccountData
-func RandomAccountData(rewardsLevel uint64) basics.AccountData {
+func RandomAccountData(rewardsBase uint64) basics.AccountData {
var data basics.AccountData
// Avoid overflowing totals
@@ -56,15 +58,27 @@ func RandomAccountData(rewardsLevel uint64) basics.AccountData {
switch crypto.RandUint64() % 3 {
case 0:
data.Status = basics.Online
+ data.VoteLastValid = 1000
case 1:
data.Status = basics.Offline
+ data.VoteLastValid = 0
default:
data.Status = basics.NotParticipating
}
- data.RewardsBase = rewardsLevel
data.VoteFirstValid = 0
+ data.RewardsBase = rewardsBase
+ return data
+}
+
+// RandomOnlineAccountData is similar to RandomAccountData but always creates online account
+func RandomOnlineAccountData(rewardsBase uint64) basics.AccountData {
+ var data basics.AccountData
+ data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
+ data.Status = basics.Online
data.VoteLastValid = 1000
+ data.VoteFirstValid = 0
+ data.RewardsBase = rewardsBase
return data
}
@@ -190,12 +204,21 @@ func RandomAppLocalState() basics.AppLocalState {
func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.CreatableIndex, assets map[basics.AssetIndex]struct{}, apps map[basics.AppIndex]struct{}) basics.AccountData {
data := RandomAccountData(rewardsLevel)
- crypto.RandBytes(data.VoteID[:])
- crypto.RandBytes(data.SelectionID[:])
- crypto.RandBytes(data.StateProofID[:])
- data.VoteFirstValid = basics.Round(crypto.RandUint64())
- data.VoteLastValid = basics.Round(crypto.RandUint64())
- data.VoteKeyDilution = crypto.RandUint64()
+ if data.Status == basics.Online {
+ crypto.RandBytes(data.VoteID[:])
+ crypto.RandBytes(data.SelectionID[:])
+ crypto.RandBytes(data.StateProofID[:])
+ data.VoteFirstValid = basics.Round(crypto.RandUint64())
+ data.VoteLastValid = basics.Round(crypto.RandUint64() % uint64(math.MaxInt64)) // int64 is the max sqlite can store
+ data.VoteKeyDilution = crypto.RandUint64()
+ } else {
+ data.VoteID = crypto.OneTimeSignatureVerifier{}
+ data.SelectionID = crypto.VRFVerifier{}
+ data.StateProofID = merklesignature.Commitment{}
+ data.VoteFirstValid = 0
+ data.VoteLastValid = 0
+ data.VoteKeyDilution = 0
+ }
if (crypto.RandUint64() % 2) == 1 {
// if account has created assets, have these defined.
createdAssetsCount := crypto.RandUint64()%20 + 1
diff --git a/ledger/tracker.go b/ledger/tracker.go
index ca3bf894d..ca042a279 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -33,6 +33,7 @@ import (
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-deadlock"
)
@@ -142,13 +143,16 @@ type ledgerForTracker interface {
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
GenesisHash() crypto.Digest
GenesisProto() config.ConsensusParams
+ GenesisProtoVersion() protocol.ConsensusVersion
GenesisAccounts() map[basics.Address]basics.AccountData
}
type trackerRegistry struct {
trackers []ledgerTracker
- // the accts has some exceptional usages in the tracker registry.
- accts *accountUpdates
+ // these trackers have some exceptional usages in the tracker registry.
+ accts *accountUpdates
+ acctsOnline *onlineAccounts
+ tail *txTail
// ctx is the context for the committing go-routine.
ctx context.Context
@@ -183,6 +187,8 @@ type trackerRegistry struct {
// lastFlushTime is the time we last flushed updates to
// the accounts DB (bumping dbRound).
lastFlushTime time.Time
+
+ cfg config.Local
}
// deferredCommitRange is used during the calls to produceCommittingTask, and used as a data structure
@@ -192,20 +198,33 @@ type deferredCommitRange struct {
offset uint64
oldBase basics.Round
lookback basics.Round
+ // lowestRound defines how many rounds of history the voters trackers want to preserve.
+ // This value overruns the MaxBalLookback if greater. See lowestRound() for details.
+ lowestRound basics.Round
+
+ // catchpointLookback determines the offset from round number to take a snapshot for.
+ // i.e. for round X the DB snapshot is taken at X-catchpointLookback
+ catchpointLookback uint64
// pendingDeltas is the number of accounts that were modified within this commit context.
// note that in this number we might have the same account being modified several times.
pendingDeltas int
- isCatchpointRound bool
+ // True iff we are doing the first stage of catchpoint generation, possibly creating
+ // a catchpoint data file, in this commit cycle iteration.
+ catchpointFirstStage bool
- // catchpointWriting is a pointer to a variable with the same name in the catchpointTracker.
- // it's used in order to reset the catchpointWriting flag from the acctupdates's
- // prepareCommit/commitRound ( which is called before the corresponding catchpoint tracker method )
- catchpointWriting *int32
+ // catchpointDataWriting is a pointer to a variable with the same name in the
+ // catchpointTracker. It's used in order to reset the catchpointDataWriting flag from
+ // the acctupdates's prepareCommit/commitRound (which is called before the
+ // corresponding catchpoint tracker method.
+ catchpointDataWriting *int32
// enableGeneratingCatchpointFiles controls whether the node produces catchpoint files or not.
enableGeneratingCatchpointFiles bool
+
+ // True iff the commit range includes a catchpoint round.
+ catchpointSecondStage bool
}
// deferredCommitContext is used in order to syncornize the persistence of a given deferredCommitRange.
@@ -218,8 +237,10 @@ type deferredCommitContext struct {
genesisProto config.ConsensusParams
- deltas []ledgercore.AccountDeltas
- roundTotals ledgercore.AccountTotals
+ roundTotals ledgercore.AccountTotals
+ onlineRoundParams []ledgercore.OnlineRoundParamsData
+ onlineAccountsForgetBefore basics.Round
+
compactAccountDeltas compactAccountDeltas
compactResourcesDeltas compactResourcesDeltas
compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable
@@ -227,10 +248,23 @@ type deferredCommitContext struct {
updatedPersistedAccounts []persistedAccountData
updatedPersistedResources map[basics.Address][]persistedResourcesData
- committedRoundDigest crypto.Digest
- trieBalancesHash crypto.Digest
+ compactOnlineAccountDeltas compactOnlineAccountDeltas
+ updatedPersistedOnlineAccounts []persistedOnlineAccountData
+
updatingBalancesDuration time.Duration
- catchpointLabel string
+
+ // Block hashes for the committed rounds range.
+ committedRoundDigests []crypto.Digest
+
+ // on catchpoint rounds, the transaction tail would fill up this field with the hash of the recent 1001 rounds
+ // of the txtail data. The catchpointTracker would be able to use that for calculating the catchpoint label.
+ txTailHash crypto.Digest
+
+ // serialized rounds deltas to be committed
+ txTailDeltas [][]byte
+
+ // txtail rounds deltas history size
+ txTailRetainSize uint64
stats telemetryspec.AccountsUpdateMetrics
updateStats bool
@@ -256,16 +290,23 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack
tr.commitSyncerClosed = make(chan struct{})
tr.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
tr.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
+ tr.cfg = cfg
go tr.commitSyncer(tr.deferredCommits)
tr.trackers = append([]ledgerTracker{}, trackers...)
+ // accountUpdates and onlineAccounts are needed for replaying (called in later in loadFromDisk)
for _, tracker := range tr.trackers {
- if accts, ok := tracker.(*accountUpdates); ok {
- tr.accts = accts
- break
+ switch t := tracker.(type) {
+ case *accountUpdates:
+ tr.accts = t
+ case *onlineAccounts:
+ tr.acctsOnline = t
+ case *txTail:
+ tr.tail = t
}
}
+
return
}
@@ -283,16 +324,9 @@ func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
}
}
- err := tr.initializeTrackerCaches(l)
+ err := tr.replay(l)
if err != nil {
- return fmt.Errorf("initializeTrackerCaches failed : %w", err)
- }
-
- // the votes have a special dependency on the account updates, so we need to initialize these separetly.
- tr.accts.voters = &votersTracker{}
- err = tr.accts.voters.loadFromDisk(l, tr.accts)
- if err != nil {
- err = fmt.Errorf("voters tracker failed to loadFromDisk : %w", err)
+ err = fmt.Errorf("initializeTrackerCaches failed : %w", err)
}
return err
}
@@ -321,16 +355,7 @@ func (tr *trackerRegistry) committedUpTo(rnd basics.Round) basics.Round {
return minBlock
}
-func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) {
- dcc := &deferredCommitContext{
- deferredCommitRange: deferredCommitRange{
- lookback: maxLookback,
- },
- }
- cdr := &dcc.deferredCommitRange
-
- tr.mu.RLock()
- dbRound := tr.dbRound
+func (tr *trackerRegistry) produceCommittingTask(blockqRound basics.Round, dbRound basics.Round, cdr *deferredCommitRange) *deferredCommitRange {
for _, lt := range tr.trackers {
base := cdr.oldBase
offset := cdr.offset
@@ -345,6 +370,19 @@ func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round)
tr.log.Warnf("tracker %T modified oldBase %d that expected to be %d, dbRound %d, latestRound %d", lt, cdr.oldBase, base, dbRound, blockqRound)
}
}
+ return cdr
+}
+
+func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) {
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ cdr := tr.produceCommittingTask(blockqRound, dbRound, &dcc.deferredCommitRange)
if cdr != nil {
dcc.deferredCommitRange = *cdr
} else {
@@ -354,7 +392,7 @@ func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round)
// ( unless we're creating a catchpoint, in which case we want to flush it right away
// so that all the instances of the catchpoint would contain exactly the same data )
flushTime := time.Now()
- if dcc != nil && !flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval)) && !dcc.isCatchpointRound && dcc.pendingDeltas < pendingDeltasFlushThreshold {
+ if dcc != nil && !flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval)) && !dcc.catchpointFirstStage && !dcc.catchpointSecondStage && dcc.pendingDeltas < pendingDeltasFlushThreshold {
dcc = nil
}
tr.mu.RUnlock()
@@ -399,7 +437,10 @@ func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitCont
if !ok {
return
}
- tr.commitRound(commit)
+ err := tr.commitRound(commit)
+ if err != nil {
+ tr.log.Warnf("Could not commit round: %w", err)
+ }
case <-tr.ctx.Done():
// drain the pending commits queue:
drained := false
@@ -417,7 +458,7 @@ func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitCont
}
// commitRound commits the given deferredCommitContext via the trackers.
-func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
+func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
defer tr.accountsWriting.Done()
tr.mu.RLock()
@@ -431,7 +472,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
lt.handleUnorderedCommit(dcc)
}
tr.mu.RUnlock()
- return
+ return nil
}
// adjust the offset according to what happened meanwhile..
@@ -442,7 +483,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
// flush, without the commitRound have a chance of committing these rounds.
if offset == 0 {
tr.mu.RUnlock()
- return
+ return nil
}
dbRound = tr.dbRound
@@ -458,7 +499,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
if err != nil {
tr.log.Errorf(err.Error())
tr.mu.RUnlock()
- return
+ return err
}
}
tr.mu.RUnlock()
@@ -473,18 +514,13 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
}
}
- err = updateAccountsRound(tx, dbRound+basics.Round(offset))
- if err != nil {
- return err
- }
-
- return nil
+ return updateAccountsRound(tx, dbRound+basics.Round(offset))
})
ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
if err != nil {
tr.log.Warnf("unable to advance tracker db snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
- return
+ return err
}
tr.mu.Lock()
@@ -499,30 +535,32 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
lt.postCommitUnlocked(tr.ctx, dcc)
}
+ return nil
}
-// initializeTrackerCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
+// replay fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
-func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err error) {
+func (tr *trackerRegistry) replay(l ledgerForTracker) (err error) {
lastestBlockRound := l.Latest()
lastBalancesRound := tr.dbRound
var blk bookkeeping.Block
var delta ledgercore.StateDelta
- if tr.accts == nil {
+ if tr.accts == nil || tr.acctsOnline == nil || tr.tail == nil {
return errMissingAccountUpdateTracker
}
accLedgerEval := accountUpdatesLedgerEvaluator{
au: tr.accts,
+ ao: tr.acctsOnline,
}
if lastBalancesRound < lastestBlockRound {
accLedgerEval.prevHeader, err = l.BlockHdr(lastBalancesRound)
if err != nil {
- return fmt.Errorf("unable to load block header %d : %w", lastBalancesRound, err)
+ return fmt.Errorf("trackerRegistry.replay: unable to load block header %d : %w", lastBalancesRound, err)
}
}
@@ -533,7 +571,7 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
select {
case <-writeAccountCacheMessageCompleted:
if err == nil {
- tr.log.Infof("initializeTrackerCaches completed initializing account data caches")
+ tr.log.Infof("trackerRegistry.replay completed initializing account data caches")
}
default:
}
@@ -553,7 +591,7 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
go func() {
select {
case <-time.After(initializingAccountCachesMessageTimeout):
- tr.log.Infof("initializeTrackerCaches is initializing account data caches")
+ tr.log.Infof("trackerRegistry.replay is initializing account data caches")
close(writeAccountCacheMessageCompleted)
case <-skipAccountCacheMessage:
}
@@ -596,11 +634,13 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
}
}()
+ maxAcctLookback := tr.cfg.MaxAcctLookback
+
for blk := range blocksStream {
delta, err = l.trackerEvalVerified(blk, &accLedgerEval)
if err != nil {
close(blockEvalFailed)
- err = fmt.Errorf("trackerEvalVerified failed : %w", err)
+ err = fmt.Errorf("trackerRegistry.replay: trackerEvalVerified failed : %w", err)
return
}
tr.newBlock(blk, delta)
@@ -609,7 +649,7 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
// 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
// 2. if we completed the loading and we loaded up more than 320 rounds.
flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
- loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
+ loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(maxAcctLookback) < lastestBlockRound)
if flushIntervalExceed || loadCompleted {
// adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
tr.lastFlushTime = time.Now().Add(-balancesFlushInterval)
@@ -618,7 +658,7 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
// switch to rebuild synchronous mode to improve performance
err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.accountsRebuildSynchronousMode, tr.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
if err0 != nil {
- tr.log.Warnf("initializeTrackerCaches was unable to switch to rbuild synchronous mode : %v", err0)
+ tr.log.Warnf("trackerRegistry.replay was unable to switch to rbuild synchronous mode : %v", err0)
} else {
// flip the switch to rollback the synchronous mode once we're done.
rollbackSynchronousMode = true
@@ -628,25 +668,22 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
var roundsBehind basics.Round
// flush the account data
- tr.scheduleCommit(blk.Round(), basics.Round(config.Consensus[blk.BlockHeader.CurrentProtocol].MaxBalLookback))
+ tr.scheduleCommit(blk.Round(), basics.Round(maxAcctLookback))
// wait for the writing to complete.
tr.waitAccountsWriting()
- func() {
- tr.mu.RLock()
- defer tr.mu.RUnlock()
-
- // The au.dbRound after writing should be ~320 behind the block round.
- roundsBehind = blk.Round() - tr.dbRound
- }()
+ tr.mu.RLock()
+ // The au.dbRound after writing should be ~320 behind the block round (before shorter delta project)
+ roundsBehind = blk.Round() - tr.dbRound
+ tr.mu.RUnlock()
// are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(catchpointInterval) {
// we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
// would just accumulate in memory.
close(blockEvalFailed)
- tr.log.Errorf("initializeTrackerCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", blk.Round()-roundsBehind, blk.Round())
- err = fmt.Errorf("initializeTrackerCaches failed to initialize the account data caches")
+ tr.log.Errorf("trackerRegistry.replay was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", blk.Round()-roundsBehind, blk.Round())
+ err = fmt.Errorf("trackerRegistry.replay failed to initialize the account data caches")
return
}
@@ -663,7 +700,7 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
close(writeAccountCacheMessageCompleted)
default:
}
- tr.log.Infof("initializeTrackerCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
+ tr.log.Infof("trackerRegistry.replay is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
lastProgressMessage = time.Now()
}
@@ -675,5 +712,4 @@ func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err erro
err = blockRetrievalError
}
return
-
}
diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go
index fe212ad9a..9e4c10415 100644
--- a/ledger/tracker_test.go
+++ b/ledger/tracker_test.go
@@ -22,6 +22,7 @@ import (
"database/sql"
"sync"
"testing"
+ "time"
"github.com/stretchr/testify/require"
@@ -65,13 +66,15 @@ func TestTrackerScheduleCommit(t *testing.T) {
au := &accountUpdates{}
ct := &catchpointTracker{}
+ ao := &onlineAccounts{}
au.initialize(conf)
ct.initialize(conf, ".")
+ ao.initialize(conf)
_, err := trackerDBInitialize(ml, false, ".")
a.NoError(err)
- ml.trackers.initialize(ml, []ledgerTracker{au, ct}, conf)
+ ml.trackers.initialize(ml, []ledgerTracker{au, ct, ao, &txTail{}}, conf)
defer ml.trackers.close()
err = ml.trackers.loadFromDisk(ml)
a.NoError(err)
@@ -81,13 +84,7 @@ func TestTrackerScheduleCommit(t *testing.T) {
<-ml.trackers.commitSyncerClosed
ml.trackers.commitSyncerClosed = nil
- // simulate situation when au returns smaller offset b/c of consecutive versions
- // and ct increses it
- // base = 1, offset = 100, lookback = 16
- // lastest = 1000
- // would give a large mostRecentCatchpointRound value => large newBase => larger offset
-
- expectedOffset := uint64(100)
+ expectedOffset := uint64(99)
blockqRound := basics.Round(1000)
lookback := basics.Round(16)
dbRound := basics.Round(1)
@@ -97,11 +94,15 @@ func TestTrackerScheduleCommit(t *testing.T) {
au.deltas = make([]ledgercore.AccountDeltas, int(blockqRound))
au.deltasAccum = make([]int, int(blockqRound))
au.versions = make([]protocol.ConsensusVersion, int(blockqRound))
+ ao.deltas = make([]ledgercore.AccountDeltas, int(blockqRound))
+ ao.onlineRoundParamsData = make([]ledgercore.OnlineRoundParamsData, int(blockqRound))
for i := 0; i <= int(expectedOffset); i++ {
au.versions[i] = protocol.ConsensusCurrentVersion
+ ao.onlineRoundParamsData[i] = ledgercore.OnlineRoundParamsData{CurrentProtocol: protocol.ConsensusCurrentVersion}
}
for i := int(expectedOffset) + 1; i < len(au.versions); i++ {
au.versions[i] = protocol.ConsensusFuture
+ ao.onlineRoundParamsData[i] = ledgercore.OnlineRoundParamsData{CurrentProtocol: protocol.ConsensusFuture}
}
au.accountsMu.Unlock()
@@ -117,6 +118,10 @@ func TestTrackerScheduleCommit(t *testing.T) {
a.NotNil(cdr)
a.Equal(expectedOffset, cdr.offset)
+ cdr = ao.produceCommittingTask(blockqRound, dbRound, cdr)
+ a.NotNil(cdr)
+ a.Equal(expectedOffset, cdr.offset)
+
cdr = ct.produceCommittingTask(blockqRound, dbRound, cdr)
a.NotNil(cdr)
// before the fix
@@ -126,6 +131,7 @@ func TestTrackerScheduleCommit(t *testing.T) {
// schedule the commit. au is expected to return offset 100 and
ml.trackers.mu.Lock()
ml.trackers.dbRound = dbRound
+ ml.trackers.lastFlushTime = time.Time{}
ml.trackers.mu.Unlock()
ml.trackers.scheduleCommit(blockqRound, lookback)
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
index 6757e280e..3e8773225 100644
--- a/ledger/trackerdb.go
+++ b/ledger/trackerdb.go
@@ -27,17 +27,20 @@ import (
"time"
"github.com/algorand/go-algorand/config"
+
"github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
type trackerDBParams struct {
initAccounts map[basics.Address]basics.AccountData
- initProto config.ConsensusParams
+ initProto protocol.ConsensusVersion
catchpointEnabled bool
dbPathPrefix string
+ blockDb db.Pair
}
type trackerDBSchemaInitializer struct {
@@ -63,6 +66,7 @@ type trackerDBInitParams struct {
// procedures to bring it up to the database schema supported by the binary.
func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefix string) (mgr trackerDBInitParams, err error) {
dbs := l.trackerDB()
+ bdbs := l.blockDB()
log := l.trackerLog()
lastestBlockRound := l.Latest()
@@ -73,9 +77,15 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi
}
err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- tp := trackerDBParams{l.GenesisAccounts(), l.GenesisProto(), catchpointEnabled, dbPathPrefix}
+ tp := trackerDBParams{
+ initAccounts: l.GenesisAccounts(),
+ initProto: l.GenesisProtoVersion(),
+ catchpointEnabled: catchpointEnabled,
+ dbPathPrefix: dbPathPrefix,
+ blockDb: bdbs,
+ }
var err0 error
- mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ mgr, err0 = runMigrations(ctx, tx, tp, log, accountDBVersion)
if err0 != nil {
return err0
}
@@ -86,11 +96,11 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi
// Check for blocks DB and tracker DB un-sync
if lastBalancesRound > lastestBlockRound {
log.Warnf("trackerDBInitialize: resetting accounts DB (on round %v, but blocks DB's latest is %v)", lastBalancesRound, lastestBlockRound)
- err0 = accountsReset(tx)
+ err0 = accountsReset(ctx, tx)
if err0 != nil {
return err0
}
- mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ mgr, err0 = runMigrations(ctx, tx, tp, log, accountDBVersion)
if err0 != nil {
return err0
}
@@ -101,10 +111,10 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi
return
}
-// trackerDBInitializeImpl initializes the accounts DB if needed and return current account round.
+// runMigrations initializes the accounts DB if needed and return current account round.
// as part of the initialization, it tests the current database schema version, and perform upgrade
// procedures to bring it up to the database schema supported by the binary.
-func trackerDBInitializeImpl(ctx context.Context, tx *sql.Tx, params trackerDBParams, log logging.Logger) (mgr trackerDBInitParams, err error) {
+func runMigrations(ctx context.Context, tx *sql.Tx, params trackerDBParams, log logging.Logger, targetVersion int32) (mgr trackerDBInitParams, err error) {
// check current database version.
dbVersion, err := db.GetUserVersion(ctx, tx)
if err != nil {
@@ -119,15 +129,15 @@ func trackerDBInitializeImpl(ctx context.Context, tx *sql.Tx, params trackerDBPa
// if database version is greater than supported by current binary, write a warning. This would keep the existing
// fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
- if tu.version() > accountDBVersion {
- tu.log.Warnf("trackerDBInitialize database schema version is %d, but algod supports only %d", tu.version(), accountDBVersion)
+ if tu.version() > targetVersion {
+ tu.log.Warnf("trackerDBInitialize database schema version is %d, but migration target version is %d", tu.version(), targetVersion)
}
- if tu.version() < accountDBVersion {
- tu.log.Infof("trackerDBInitialize upgrading database schema from version %d to version %d", tu.version(), accountDBVersion)
+ if tu.version() < targetVersion {
+ tu.log.Infof("trackerDBInitialize upgrading database schema from version %d to version %d", tu.version(), targetVersion)
// newDatabase is determined during the tables creations. If we're filling the database with accounts,
// then we set this variable to true, allowing some of the upgrades to be skipped.
- for tu.version() < accountDBVersion {
+ for tu.version() < targetVersion {
tu.log.Infof("trackerDBInitialize performing upgrade from version %d", tu.version())
// perform the initialization/upgrade
switch tu.version() {
@@ -167,6 +177,12 @@ func trackerDBInitializeImpl(ctx context.Context, tx *sql.Tx, params trackerDBPa
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 5 : %v", err)
return
}
+ case 6:
+ err = tu.upgradeDatabaseSchema6(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 6 : %v", err)
+ return
+ }
default:
return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
}
@@ -213,7 +229,7 @@ func (tu trackerDBSchemaInitializer) version() int32 {
//
func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (err error) {
tu.log.Infof("upgradeDatabaseSchema0 initializing schema")
- tu.newDatabase, err = accountsInit(tx, tu.initAccounts, tu.initProto)
+ tu.newDatabase, err = accountsInit(tx, tu.initAccounts, config.Consensus[tu.initProto])
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema0 unable to initialize schema : %v", err)
}
@@ -254,31 +270,22 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context
tu.log.Infof("upgradeDatabaseSchema1 resetting account hashes")
// reset the merkle trie
- err = resetAccountHashes(tx)
+ err = resetAccountHashes(ctx, tx)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema1 unable to reset account hashes : %v", err)
}
tu.log.Infof("upgradeDatabaseSchema1 preparing queries")
- // initialize a new accountsq with the incoming transaction.
- accountsq, err := accountsInitDbQueries(tx, tx)
- if err != nil {
- return fmt.Errorf("upgradeDatabaseSchema1 unable to prepare queries : %v", err)
- }
-
- // close the prepared statements when we're done with them.
- defer accountsq.close()
-
tu.log.Infof("upgradeDatabaseSchema1 resetting prior catchpoints")
// delete the last catchpoint label if we have any.
- _, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
+ err = writeCatchpointStateString(ctx, tx, catchpointStateLastCatchpoint, "")
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema1 unable to clear prior catchpoint : %v", err)
}
tu.log.Infof("upgradeDatabaseSchema1 deleting stored catchpoints")
// delete catchpoints.
- err = deleteStoredCatchpoints(ctx, accountsq, tu.trackerDBParams.dbPathPrefix)
+ err = deleteStoredCatchpoints(ctx, tx, tu.trackerDBParams.dbPathPrefix)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema1 unable to delete stored catchpoints : %v", err)
}
@@ -308,7 +315,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context
// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
// adding the normalizedonlinebalance column to the accountbase table.
func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (err error) {
- err = accountsAddNormalizedBalance(tx, tu.initProto)
+ err = accountsAddNormalizedBalance(tx, config.Consensus[tu.initProto])
if err != nil {
return err
}
@@ -403,7 +410,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context
}
// reset the merkle trie
- err = resetAccountHashes(tx)
+ err = resetAccountHashes(ctx, tx)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema5 unable to reset account hashes : %v", err)
}
@@ -412,6 +419,90 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context
return tu.setVersion(ctx, tx, 6)
}
+func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Context, tx *sql.Tx) error {
+ // Delete an unfinished catchpoint if there is one.
+ round, err := readCatchpointStateUint64(ctx, tx, catchpointStateWritingCatchpoint)
+ if err != nil {
+ return err
+ }
+ if round == 0 {
+ return nil
+ }
+
+ relCatchpointFilePath := filepath.Join(
+ CatchpointDirName,
+ makeCatchpointFilePath(basics.Round(round)))
+ err = removeSingleCatchpointFileFromDisk(tu.dbPathPrefix, relCatchpointFilePath)
+ if err != nil {
+ return err
+ }
+
+ return writeCatchpointStateUint64(ctx, tx, catchpointStateWritingCatchpoint, 0)
+}
+
+// upgradeDatabaseSchema6 upgrades the database schema from version 6 to version 7,
+// adding a new onlineaccounts table
+// TODO: onlineaccounts: upgrade as needed after switching to the final table version
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context, tx *sql.Tx) (err error) {
+ err = accountsCreateOnlineAccountsTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ err = accountsCreateTxTailTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ err = accountsCreateOnlineRoundParamsTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ var lastProgressInfoMsg time.Time
+ const progressLoggingInterval = 5 * time.Second
+
+ migrationProcessLog := func(processed, total uint64) {
+ if time.Since(lastProgressInfoMsg) < progressLoggingInterval {
+ return
+ }
+ lastProgressInfoMsg = time.Now()
+ tu.log.Infof("upgradeDatabaseSchema6 upgraded %d out of %d accounts [ %3.1f%% ]", processed, total, float64(processed)*100.0/float64(total))
+ }
+ err = performOnlineAccountsTableMigration(ctx, tx, migrationProcessLog, tu.log)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema6 unable to complete online account data migration : %w", err)
+ }
+
+ if !tu.newDatabase {
+ err = performTxTailTableMigration(ctx, tx, tu.blockDb.Rdb)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema6 unable to complete transaction tail data migration : %w", err)
+ }
+ }
+
+ err = performOnlineRoundParamsTailMigration(ctx, tx, tu.blockDb.Rdb, tu.newDatabase, tu.initProto)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema6 unable to complete online round params data migration : %w", err)
+ }
+
+ err = tu.deleteUnfinishedCatchpoint(ctx, tx)
+ if err != nil {
+ return err
+ }
+ err = accountsCreateCatchpointFirstStageInfoTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+ err = accountsCreateUnfinishedCatchpointsTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 7)
+}
+
// isDirEmpty returns if a given directory is empty or not.
func isDirEmpty(path string) (bool, error) {
dir, err := os.Open(path)
diff --git a/ledger/txtail.go b/ledger/txtail.go
index 84ea7ee22..4fa2ab0a4 100644
--- a/ledger/txtail.go
+++ b/ledger/txtail.go
@@ -21,79 +21,128 @@ import (
"database/sql"
"fmt"
+ "github.com/algorand/go-deadlock"
+
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
)
const initialLastValidArrayLen = 256
-type roundTxMembers struct {
+// enableTxTailHashes enables txtail data hashing for catchpoints.
+// enable by removing it as needed (phase 2 of the catchpoints re-work)
+const enableTxTailHashes = false
+
+type roundLeases struct {
txleases map[ledgercore.Txlease]basics.Round // map of transaction lease to when it expires
proto config.ConsensusParams
}
type txTail struct {
- recent map[basics.Round]roundTxMembers
+ recent map[basics.Round]roundLeases
+
+ // roundTailSerializedDeltas contains the rounds that need to be flushed to disk.
+ // It contain the serialized(encoded) form of the txTailRound. This field would remain
+ // maintained in this data structure up until being cleared out by postCommit
+ roundTailSerializedDeltas [][]byte
+
+ // roundTailHashes contains the recent (MaxTxnLife + DeeperBlockHeaderHistory + len(deltas)) hashes.
+ // The first entry matches that current tracker database round - (MaxTxnLife + DeeperBlockHeaderHistory) + 1
+ // the second to tracker database round - (MaxTxnLife + DeeperBlockHeaderHistory - 1) + 1, and so forth.
+ // See blockHeaderData description below for the indexing details.
+ //
+ // The layout for MaxTxnLife = 3 and 3 elements in in-memory deltas:
+ // ──────────────────┐
+ // maxTxnLife(3) + 1 ├────────────
+ // │ deltas
+ // │ 0 │ 1 │ 2 │ 3 │ 4 │ 5 │ 6 │ indices
+ // └───┴───┴───┴───┴───┴───┴───┘
+ // 3 4 5 6 7 8 9 rounds
+ // /|\
+ // │
+ // dbRound
+ // roundTailHashes is planned for catchpoints in order to include it into catchpoint file,
+ // and currently disabled by enableTxTailHashes switch.
+ roundTailHashes []crypto.Digest
+
+ // blockHeaderData contains the recent (MaxTxnLife + DeeperBlockHeaderHistory + len(deltas)) block header data.
+ // The oldest entry is lowestBlockHeaderRound = database round - (MaxTxnLife + DeeperBlockHeaderHistory) + 1
+ blockHeaderData map[basics.Round]bookkeeping.BlockHeader
+ // lowestBlockHeaderRound is the lowest round in blockHeaderData, used as a starting point for old entries removal
+ lowestBlockHeaderRound basics.Round
+
+ // tailMu is the synchronization mutex for accessing roundTailHashes, roundTailSerializedDeltas and blockHeaderData.
+ tailMu deadlock.RWMutex
lastValid map[basics.Round]map[transactions.Txid]struct{} // map tx.LastValid -> tx confirmed set
// duplicate detection queries with LastValid before
// lowWaterMark are not guaranteed to succeed
lowWaterMark basics.Round // the last round known to be committed to disk
+
+ // log copied from ledger
+ log logging.Logger
}
-func (t *txTail) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
- latest := l.Latest()
- hdr, err := l.BlockHdr(latest)
- if err != nil {
- return fmt.Errorf("txTail: could not get latest block header: %v", err)
+func (t *txTail) loadFromDisk(l ledgerForTracker, dbRound basics.Round) error {
+ rdb := l.trackerDB().Rdb
+ t.log = l.trackerLog()
+
+ var roundData []*txTailRound
+ var roundTailHashes []crypto.Digest
+ var baseRound basics.Round
+ if dbRound > 0 {
+ err := rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundData, roundTailHashes, baseRound, err = loadTxTail(ctx, tx, dbRound)
+ return
+ })
+ if err != nil {
+ return err
+ }
}
- proto := config.Consensus[hdr.CurrentProtocol]
- // If the latest round is R, then any transactions from blocks strictly older than
- // R + 1 - proto.MaxTxnLife
- // could not be valid in the next round (R+1), and so are irrelevant.
- // Thus we load the txids from blocks R+1-maxTxnLife to R, inclusive
- old := (latest + 1).SubSaturate(basics.Round(proto.MaxTxnLife))
-
- t.lowWaterMark = latest
+ t.lowWaterMark = l.Latest()
t.lastValid = make(map[basics.Round]map[transactions.Txid]struct{})
+ t.recent = make(map[basics.Round]roundLeases)
- t.recent = make(map[basics.Round]roundTxMembers)
-
- // the roundsLastValids is a temporary map used during the execution of
+ // the lastValid is a temporary map used during the execution of
// loadFromDisk, allowing us to construct the lastValid maps in their
// optimal size. This would ensure that upon startup, we don't preallocate
// more memory than we truly need.
- roundsLastValids := make(map[basics.Round][]transactions.Txid)
+ lastValid := make(map[basics.Round][]transactions.Txid)
- for ; old <= latest; old++ {
- blk, err := l.Block(old)
- if err != nil {
- return err
- }
+ // the roundTailHashes and blockHeaderData need a single element to start with
+ // in order to allow lookups on zero offsets when they are empty (new database)
+ roundTailHashes = append([]crypto.Digest{{}}, roundTailHashes...)
+ blockHeaderData := make(map[basics.Round]bookkeeping.BlockHeader, len(roundData)+1)
- payset, err := blk.DecodePaysetFlat()
- if err != nil {
- return err
- }
+ t.lowestBlockHeaderRound = baseRound
+ for old := baseRound; old <= dbRound && dbRound > baseRound; old++ {
+ txTailRound := roundData[0]
+ consensusParams := config.Consensus[txTailRound.Hdr.CurrentProtocol]
- consensusParams := config.Consensus[blk.CurrentProtocol]
- t.recent[old] = roundTxMembers{
- txleases: make(map[ledgercore.Txlease]basics.Round, len(payset)),
+ t.recent[old] = roundLeases{
+ txleases: make(map[ledgercore.Txlease]basics.Round, len(txTailRound.Leases)),
proto: consensusParams,
}
- for _, txad := range payset {
- tx := txad.SignedTxn
- if consensusParams.SupportTransactionLeases && (tx.Txn.Lease != [32]byte{}) {
- t.recent[old].txleases[ledgercore.Txlease{Sender: tx.Txn.Sender, Lease: tx.Txn.Lease}] = tx.Txn.LastValid
+ if consensusParams.SupportTransactionLeases {
+ for _, rlease := range txTailRound.Leases {
+ if rlease.Lease != [32]byte{} {
+ key := ledgercore.Txlease{Sender: rlease.Sender, Lease: rlease.Lease}
+ t.recent[old].txleases[key] = txTailRound.LastValid[rlease.TxnIdx]
+ }
}
- if tx.Txn.LastValid > t.lowWaterMark {
- list := roundsLastValids[tx.Txn.LastValid]
+ }
+
+ for i := 0; i < len(txTailRound.LastValid); i++ {
+ if txTailRound.LastValid[i] > t.lowWaterMark {
+ list := lastValid[txTailRound.LastValid[i]]
// if the list reached capacity, resize.
if len(list) == cap(list) {
var newList []transactions.Txid
@@ -105,20 +154,30 @@ func (t *txTail) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
copy(newList[:], list[:])
list = newList
}
- list = append(list, tx.ID())
- roundsLastValids[tx.Txn.LastValid] = list
+ list = append(list, txTailRound.TxnIDs[i])
+ lastValid[txTailRound.LastValid[i]] = list
}
}
+
+ blockHeaderData[old] = txTailRound.Hdr
+ roundData = roundData[1:]
}
// add all the entries in roundsLastValids to their corresponding map entry in t.lastValid
- for lastValid, list := range roundsLastValids {
+ for lastValid, list := range lastValid {
lastValueMap := make(map[transactions.Txid]struct{}, len(list))
for _, id := range list {
lastValueMap[id] = struct{}{}
}
t.lastValid[lastValid] = lastValueMap
}
+
+ if enableTxTailHashes {
+ t.roundTailHashes = roundTailHashes
+ }
+ t.blockHeaderData = blockHeaderData
+ t.roundTailSerializedDeltas = make([][]byte, 0)
+
return nil
}
@@ -133,18 +192,42 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
return
}
- t.recent[rnd] = roundTxMembers{
+ var tail txTailRound
+ tail.TxnIDs = make([]transactions.Txid, len(delta.Txids))
+ tail.LastValid = make([]basics.Round, len(delta.Txids))
+ tail.Hdr = blk.BlockHeader
+
+ for txid, txnInc := range delta.Txids {
+ t.putLV(txnInc.LastValid, txid)
+ tail.TxnIDs[txnInc.Intra] = txid
+ tail.LastValid[txnInc.Intra] = txnInc.LastValid
+ if blk.Payset[txnInc.Intra].Txn.Lease != [32]byte{} {
+ tail.Leases = append(tail.Leases, txTailRoundLease{
+ Sender: blk.Payset[txnInc.Intra].Txn.Sender,
+ Lease: blk.Payset[txnInc.Intra].Txn.Lease,
+ TxnIdx: txnInc.Intra,
+ })
+ }
+ }
+ encodedTail, tailHash := tail.encode()
+
+ t.tailMu.Lock()
+ defer t.tailMu.Unlock()
+ t.recent[rnd] = roundLeases{
txleases: delta.Txleases,
proto: config.Consensus[blk.CurrentProtocol],
}
-
- for txid, lv := range delta.Txids {
- t.putLV(lv, txid)
+ t.roundTailSerializedDeltas = append(t.roundTailSerializedDeltas, encodedTail)
+ if enableTxTailHashes {
+ t.roundTailHashes = append(t.roundTailHashes, tailHash)
}
+ t.blockHeaderData[rnd] = blk.BlockHeader
}
func (t *txTail) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
- maxlife := basics.Round(t.recent[rnd].proto.MaxTxnLife)
+ proto := t.recent[rnd].proto
+ maxlife := basics.Round(proto.MaxTxnLife)
+
for r := range t.recent {
if r+maxlife < rnd {
delete(t.recent, r)
@@ -154,18 +237,68 @@ func (t *txTail) committedUpTo(rnd basics.Round) (retRound, lookback basics.Roun
delete(t.lastValid, t.lowWaterMark)
}
- return (rnd + 1).SubSaturate(maxlife), basics.Round(0)
+ deeperHistory := basics.Round(proto.DeeperBlockHeaderHistory)
+ return (rnd + 1).SubSaturate(maxlife + deeperHistory), basics.Round(0)
}
-func (t *txTail) prepareCommit(*deferredCommitContext) error {
- return nil
+func (t *txTail) prepareCommit(dcc *deferredCommitContext) (err error) {
+ dcc.txTailDeltas = make([][]byte, 0, dcc.offset)
+ t.tailMu.RLock()
+ for i := uint64(0); i < dcc.offset; i++ {
+ dcc.txTailDeltas = append(dcc.txTailDeltas, t.roundTailSerializedDeltas[i])
+ }
+ lowest := t.lowestBlockHeaderRound
+ proto, ok := config.Consensus[t.blockHeaderData[dcc.newBase].CurrentProtocol]
+ t.tailMu.RUnlock()
+ if !ok {
+ return fmt.Errorf("round %d not found in blockHeaderData: lowest=%d, base=%d", dcc.newBase, lowest, dcc.oldBase)
+ }
+ // get the MaxTxnLife from the consensus params of the latest round in this commit range
+ // preserve data for MaxTxnLife + DeeperBlockHeaderHistory
+ dcc.txTailRetainSize = proto.MaxTxnLife + proto.DeeperBlockHeaderHistory
+
+ if !dcc.catchpointFirstStage {
+ return nil
+ }
+
+ if enableTxTailHashes {
+ // update the dcc with the hash we'll need.
+ dcc.txTailHash, err = t.recentTailHash(dcc.offset, dcc.txTailRetainSize)
+ }
+ return
}
-func (t *txTail) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+func (t *txTail) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) error {
+ // determine the round to remove data
+ // the formula is similar to the committedUpTo: rnd + 1 - retain size
+ forgetBeforeRound := (dcc.newBase + 1).SubSaturate(basics.Round(dcc.txTailRetainSize))
+ baseRound := dcc.oldBase + 1
+ if err := txtailNewRound(ctx, tx, baseRound, dcc.txTailDeltas, forgetBeforeRound); err != nil {
+ return fmt.Errorf("txTail: unable to persist new round %d : %w", baseRound, err)
+ }
return nil
}
func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ t.tailMu.Lock()
+ defer t.tailMu.Unlock()
+
+ t.roundTailSerializedDeltas = t.roundTailSerializedDeltas[dcc.offset:]
+
+ // get the MaxTxnLife from the consensus params of the latest round in this commit range
+ // preserve data for MaxTxnLife + DeeperBlockHeaderHistory rounds
+ newLowestRound := (dcc.newBase + 1).SubSaturate(basics.Round(dcc.txTailRetainSize))
+ for t.lowestBlockHeaderRound < newLowestRound {
+ delete(t.blockHeaderData, t.lowestBlockHeaderRound)
+ t.lowestBlockHeaderRound++
+ }
+ if enableTxTailHashes {
+ newDeltaLength := len(t.roundTailSerializedDeltas)
+ firstTailIdx := len(t.roundTailHashes) - newDeltaLength - int(dcc.txTailRetainSize)
+ if firstTailIdx > 0 {
+ t.roundTailHashes = t.roundTailHashes[firstTailIdx:]
+ }
+ }
}
func (t *txTail) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
@@ -178,13 +311,13 @@ func (t *txTail) produceCommittingTask(committedRound basics.Round, dbRound basi
return dcr
}
-// txtailMissingRound is returned by checkDup when requested for a round number below the low watermark
-type txtailMissingRound struct {
+// errTxTailMissingRound is returned by checkDup when requested for a round number below the low watermark
+type errTxTailMissingRound struct {
round basics.Round
}
// Error satisfies builtin interface `error`
-func (t txtailMissingRound) Error() string {
+func (t errTxTailMissingRound) Error() string {
return fmt.Sprintf("txTail: tried to check for dup in missing round %d", t.round)
}
@@ -192,7 +325,7 @@ func (t txtailMissingRound) Error() string {
// TransactionInLedgerError / LeaseInLedgerError respectively.
func (t *txTail) checkDup(proto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
if lastValid < t.lowWaterMark {
- return &txtailMissingRound{round: lastValid}
+ return &errTxTailMissingRound{round: lastValid}
}
if proto.SupportTransactionLeases && (txl.Lease != [32]byte{}) {
@@ -223,3 +356,30 @@ func (t *txTail) putLV(lastValid basics.Round, id transactions.Txid) {
}
t.lastValid[lastValid][id] = struct{}{}
}
+
+func (t *txTail) recentTailHash(offset uint64, retainSize uint64) (crypto.Digest, error) {
+ // prepare a buffer to hash.
+ buffer := make([]byte, (retainSize)*crypto.DigestSize)
+ bufIdx := 0
+ t.tailMu.RLock()
+ lastOffset := offset + retainSize // size of interval [offset, lastOffset) is retainSize
+ if lastOffset > uint64(len(t.roundTailHashes)) {
+ lastOffset = uint64(len(t.roundTailHashes))
+ }
+ for i := offset; i < lastOffset; i++ {
+ copy(buffer[bufIdx:], t.roundTailHashes[i][:])
+ bufIdx += crypto.DigestSize
+ }
+ t.tailMu.RUnlock()
+ return crypto.Hash(buffer), nil
+}
+
+func (t *txTail) blockHeader(rnd basics.Round) (bookkeeping.BlockHeader, bool) {
+ t.tailMu.RLock()
+ defer t.tailMu.RUnlock()
+ hdr, ok := t.blockHeaderData[rnd]
+ if !ok {
+ t.log.Warnf("txtail failed to fetch blockHeader from rnd: %d", rnd)
+ }
+ return hdr, ok
+}
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index baa632dee..409dac092 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -17,7 +17,9 @@
package ledger
import (
+ "context"
"errors"
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
@@ -56,10 +58,12 @@ func TestTxTailCheckdup(t *testing.T) {
CurrentProtocol: protocol.ConsensusCurrentVersion,
},
},
+ Payset: make(transactions.Payset, 1),
}
- txids := make(map[transactions.Txid]basics.Round, 1)
- txids[transactions.Txid(crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(1)}))] = rnd + txvalidity
+ txids := make(map[transactions.Txid]ledgercore.IncludedTransactions, 1)
+ blk.Payset[0].Txn.Note = []byte{byte(rnd % 256), byte(rnd / 256), byte(1)}
+ txids[blk.Payset[0].Txn.ID()] = ledgercore.IncludedTransactions{LastValid: rnd + txvalidity, Intra: 0}
txleases := make(map[ledgercore.Txlease]basics.Round, 1)
txleases[ledgercore.Txlease{Sender: basics.Address(crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(2)})), Lease: crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(3)})}] = rnd + leasevalidity
@@ -72,12 +76,16 @@ func TestTxTailCheckdup(t *testing.T) {
// test txid duplication testing.
for rnd := basics.Round(1); rnd < lastRound; rnd++ {
- txid := transactions.Txid(crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(1)}))
- err := tail.checkDup(proto, basics.Round(0), basics.Round(0), rnd+txvalidity, txid, ledgercore.Txlease{})
+ Txn := transactions.Transaction{
+ Header: transactions.Header{
+ Note: []byte{byte(rnd % 256), byte(rnd / 256), byte(1)},
+ },
+ }
+ err := tail.checkDup(proto, basics.Round(0), basics.Round(0), rnd+txvalidity, Txn.ID(), ledgercore.Txlease{})
require.Errorf(t, err, "round %d", rnd)
if rnd < lastRound-lookback-txvalidity-1 {
- var missingRoundErr *txtailMissingRound
- require.Truef(t, errors.As(err, &missingRoundErr), "error a txtailMissingRound(%d) : %v ", rnd, err)
+ var missingRoundErr *errTxTailMissingRound
+ require.Truef(t, errors.As(err, &missingRoundErr), "error a errTxTailMissingRound(%d) : %v ", rnd, err)
} else {
var txInLedgerErr *ledgercore.TransactionInLedgerError
require.Truef(t, errors.As(err, &txInLedgerErr), "error a TransactionInLedgerError(%d) : %v ", rnd, err)
@@ -90,8 +98,8 @@ func TestTxTailCheckdup(t *testing.T) {
err := tail.checkDup(proto, rnd, basics.Round(0), rnd, transactions.Txid{}, lease)
require.Errorf(t, err, "round %d", rnd)
if rnd < lastRound-lookback-1 {
- var missingRoundErr *txtailMissingRound
- require.Truef(t, errors.As(err, &missingRoundErr), "error a txtailMissingRound(%d) : %v ", rnd, err)
+ var missingRoundErr *errTxTailMissingRound
+ require.Truef(t, errors.As(err, &missingRoundErr), "error a errTxTailMissingRound(%d) : %v ", rnd, err)
} else {
var leaseInLedgerErr *ledgercore.LeaseInLedgerError
require.Truef(t, errors.As(err, &leaseInLedgerErr), "error a LeaseInLedgerError(%d) : %v ", rnd, err)
@@ -101,19 +109,20 @@ func TestTxTailCheckdup(t *testing.T) {
type txTailTestLedger struct {
Ledger
+ protoVersion protocol.ConsensusVersion
}
const testTxTailValidityRange = 200
const testTxTailTxnPerRound = 150
func (t *txTailTestLedger) Latest() basics.Round {
- return basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnLife + 10)
+ return basics.Round(config.Consensus[t.protoVersion].MaxTxnLife + 10)
}
func (t *txTailTestLedger) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: protocol.ConsensusCurrentVersion,
+ CurrentProtocol: t.protoVersion,
},
}, nil
}
@@ -122,7 +131,7 @@ func (t *txTailTestLedger) Block(r basics.Round) (bookkeeping.Block, error) {
blk := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: protocol.ConsensusCurrentVersion,
+ CurrentProtocol: t.protoVersion,
},
Round: r,
},
@@ -134,6 +143,40 @@ func (t *txTailTestLedger) Block(r basics.Round) (bookkeeping.Block, error) {
return blk, nil
}
+
+func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.ConsensusVersion) error {
+ // create a corresponding blockdb.
+ inMemory := true
+ t.blockDBs, _ = dbOpenTest(ts, inMemory)
+ t.trackerDBs, _ = dbOpenTest(ts, inMemory)
+ t.protoVersion = protoVersion
+
+ tx, err := t.trackerDBs.Wdb.Handle.Begin()
+ require.NoError(ts, err)
+
+ accts := ledgertesting.RandomAccounts(20, true)
+ proto := config.Consensus[protoVersion]
+ newDB := accountsInitTest(ts, tx, accts, protoVersion)
+ require.True(ts, newDB)
+ _, err = accountsInit(tx, accts, proto)
+ require.NoError(ts, err)
+
+ roundData := make([][]byte, 0, proto.MaxTxnLife)
+ startRound := t.Latest() - basics.Round(proto.MaxTxnLife) + 1
+ for i := startRound; i <= t.Latest(); i++ {
+ blk, err := t.Block(i)
+ require.NoError(ts, err)
+ tail, err := txTailRoundFromBlock(blk)
+ require.NoError(ts, err)
+ encoded, _ := tail.encode()
+ roundData = append(roundData, encoded)
+ }
+ err = txtailNewRound(context.Background(), tx, startRound, roundData, 0)
+ require.NoError(ts, err)
+ tx.Commit()
+ return nil
+}
+
func makeTxTailTestTransaction(r basics.Round, txnIdx int) (txn transactions.SignedTxnInBlock) {
txn.Txn.FirstValid = r
txn.Txn.LastValid = r + testTxTailValidityRange
@@ -152,8 +195,9 @@ func TestTxTailLoadFromDisk(t *testing.T) {
partitiontest.PartitionTest(t)
var ledger txTailTestLedger
txtail := txTail{}
+ require.NoError(t, ledger.initialize(t, protocol.ConsensusCurrentVersion))
- err := txtail.loadFromDisk(&ledger, 0)
+ err := txtail.loadFromDisk(&ledger, ledger.Latest())
require.NoError(t, err)
require.Equal(t, int(config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnLife), len(txtail.recent))
require.Equal(t, testTxTailValidityRange, len(txtail.lastValid))
@@ -174,7 +218,7 @@ func TestTxTailLoadFromDisk(t *testing.T) {
if r >= ledger.Latest()-testTxTailValidityRange {
require.Equal(t, ledgercore.MakeLeaseInLedgerError(txn.Txn.ID(), txl), dupResult)
} else {
- require.Equal(t, &txtailMissingRound{round: txn.Txn.LastValid}, dupResult)
+ require.Equal(t, &errTxTailMissingRound{round: txn.Txn.LastValid}, dupResult)
}
} else {
// transaction has no lease
@@ -189,8 +233,137 @@ func TestTxTailLoadFromDisk(t *testing.T) {
require.Nil(t, dupResult)
}
} else {
- require.Equal(t, &txtailMissingRound{round: txn.Txn.LastValid}, dupResult)
+ require.Equal(t, &errTxTailMissingRound{round: txn.Txn.LastValid}, dupResult)
+ }
+ }
+ }
+}
+
+func TestTxTailDeltaTracking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ for _, protoVersion := range []protocol.ConsensusVersion{protocol.ConsensusV32, protocol.ConsensusFuture} {
+ t.Run(string(protoVersion), func(t *testing.T) {
+
+ var ledger txTailTestLedger
+ txtail := txTail{}
+ require.NoError(t, ledger.initialize(t, protoVersion))
+
+ err := txtail.loadFromDisk(&ledger, ledger.Latest())
+ require.NoError(t, err)
+ fmt.Printf("%d, %s\n", len(txtail.recent), protoVersion)
+ require.Equal(t, int(config.Consensus[protoVersion].MaxTxnLife), len(txtail.recent))
+ require.Equal(t, testTxTailValidityRange, len(txtail.lastValid))
+ require.Equal(t, ledger.Latest(), txtail.lowWaterMark)
+
+ var lease [32]byte
+ for i := int(ledger.Latest()) + 1; i < int(config.Consensus[protoVersion].MaxTxnLife)*3; i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ TimeStamp: int64(i << 10),
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protoVersion,
+ },
+ },
+ Payset: make(transactions.Payset, 1),
+ }
+ sender := &basics.Address{}
+ sender[0] = byte(i)
+ sender[1] = byte(i >> 8)
+ sender[2] = byte(i >> 16)
+ blk.Payset[0].Txn.Sender = *sender
+ blk.Payset[0].Txn.Lease = lease
+ deltas := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
+ deltas.Txids[blk.Payset[0].Txn.ID()] = ledgercore.IncludedTransactions{
+ LastValid: basics.Round(i + 50),
+ Intra: 0,
+ }
+ deltas.Txleases[ledgercore.Txlease{Sender: blk.Payset[0].Txn.Sender, Lease: blk.Payset[0].Txn.Lease}] = basics.Round(i + 50)
+
+ txtail.newBlock(blk, deltas)
+ txtail.committedUpTo(basics.Round(i))
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ oldBase: basics.Round(i - 1),
+ offset: 1,
+ catchpointFirstStage: true,
+ },
+ newBase: basics.Round(i),
+ }
+ err = txtail.prepareCommit(dcc)
+ require.NoError(t, err)
+
+ tx, err := ledger.trackerDBs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+
+ err = txtail.commitRound(context.Background(), tx, dcc)
+ require.NoError(t, err)
+ tx.Commit()
+ proto := config.Consensus[protoVersion]
+ retainSize := proto.MaxTxnLife + proto.DeeperBlockHeaderHistory
+ if uint64(i) > proto.MaxTxnLife*2 {
+ // validate internal storage length.
+ require.Equal(t, 1, len(txtail.roundTailSerializedDeltas))
+ require.Equal(t, int(retainSize+1), len(txtail.blockHeaderData)) // retainSize + 1 in-memory delta
+ if enableTxTailHashes {
+ require.Equal(t, int(retainSize+1), len(txtail.roundTailHashes))
+ }
+ }
+ txtail.postCommit(context.Background(), dcc)
+ if uint64(i) > proto.MaxTxnLife*2 {
+ // validate internal storage length.
+ require.Zero(t, len(txtail.roundTailSerializedDeltas))
+ require.Equal(t, int(retainSize), len(txtail.blockHeaderData))
+ if enableTxTailHashes {
+ require.Equal(t, int(retainSize), len(txtail.roundTailHashes))
+ }
+ }
+ }
+ })
+ }
+}
+
+// BenchmarkTxTailBlockHeaderCache adds 2M random blocks by calling
+// newBlock and postCommit on txTail tracker, and reports memory allocations
+func BenchmarkTxTailBlockHeaderCache(b *testing.B) {
+ const numBlocks = 2_000_000
+ b.ReportAllocs()
+
+ accts := ledgertesting.RandomAccounts(10, false)
+ ledger := makeMockLedgerForTracker(b, true, 1, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{accts})
+ tail := txTail{}
+ require.NoError(b, tail.loadFromDisk(ledger, 0))
+
+ dbRound := basics.Round(0)
+ const lookback = 8
+ for i := 1; i < numBlocks+1; i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ TimeStamp: int64(i << 10),
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ },
+ }
+ tail.newBlock(blk, ledgercore.StateDelta{})
+
+ if i%10 == 0 || i == numBlocks {
+ offset := uint64(i - int(dbRound) - lookback)
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ offset: offset,
+ oldBase: dbRound,
+ lookback: lookback,
+ },
+ newBase: dbRound + basics.Round(offset),
}
+ err := tail.prepareCommit(dcc)
+ require.NoError(b, err)
+ tail.postCommit(context.Background(), dcc)
+ dbRound = dcc.newBase
+ require.Less(b, len(tail.blockHeaderData), 1001+10)
}
}
}
diff --git a/ledger/voters.go b/ledger/voters.go
index f3b36b603..d0a76a6cd 100644
--- a/ledger/voters.go
+++ b/ledger/voters.go
@@ -18,6 +18,7 @@ package ledger
import (
"fmt"
+ "github.com/algorand/go-algorand/stateproof"
"sync"
"github.com/algorand/go-algorand/config"
@@ -27,84 +28,95 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-// The votersTracker maintains the Merkle tree for the most recent
-// commitments to online accounts for compact certificates.
+// The votersTracker maintains the vector commitment for the most recent
+// commitments to online accounts for state proofs.
//
-// We maintain multiple Merkle trees: we might commit to a new Merkle tree in
-// block X, but we need the Merkle tree from block X-params.CompactCertBlocks
-// to build the compact certificate for block X.
+// We maintain multiple vector commitment: we might commit to a new VC in
+// block X, but we need the VC from block X-params.StateProofBlocks
+// to build the state proof for block X.
//
// votersTracker is kind-of like a tracker, but hangs off the acctupdates
// rather than a direct ledger tracker. We don't have an explicit interface
// for such an "accounts tracker" yet, however.
type votersTracker struct {
- // round contains the top online accounts in a given round.
+ // votersForRoundCache contains the top online accounts in a given Round.
//
- // To avoid increasing block latency, we include a Merkle commitment
+ // To avoid increasing block latency, we include a vector commitment
// to the top online accounts as of block X in the block header of
- // block X+CompactCertVotersLookback. This gives each node some time
- // to construct this Merkle tree, before its root is needed in a block.
+ // block X+StateProofVotersLookback. This gives each node some time
+ // to construct this vector commitment, before its root is needed in a block.
//
- // This round map is indexed by the block X, using the terminology from
- // the above example, to be used in X+CompactCertVotersLookback.
+ // This votersForRoundCache map is indexed by the block X, using the terminology from
+ // the above example, to be used in X+StateProofVotersLookback.
//
- // We maintain round entries for two reasons:
+ // We maintain votersForRoundCache entries for two reasons:
//
// The first is to maintain the tree for an upcoming block -- that is,
// if X+Loookback<Latest. The block evaluator can ask for the root of
// the tree to propose and validate a block.
//
- // The second is to construct compact certificates. Compact certificates
- // are formed for blocks that are a multiple of CompactCertRounds, using
- // the Merkle commitment to online accounts from the previous such block.
- // Thus, we maintain X in the round map until we form a compact certificate
- // for round X+CompactCertVotersLookback+CompactCertRounds.
- round map[basics.Round]*ledgercore.VotersForRound
-
- l ledgerForTracker
- au *accountUpdates
+ // The second is to construct state proof. State proofs
+ // are formed for blocks that are a multiple of StateProofInterval, using
+ // the vector commitment to online accounts from the previous such block.
+ // Thus, we maintain X in the votersForRoundCache map until we form a stateproof
+ // for round X+StateProofVotersLookback+StateProofInterval.
+ //
+ // In case state proof chain stalls this map would be bounded to StateProofMaxRecoveryIntervals + 3
+ // + 1 - since votersForRoundCache needs to contain an entry for a future state proof
+ // + 1 - since votersForRoundCache needs to contain an entry to verify the earliest state proof
+ // in the recovery interval. i.e. it needs to have an entry for R-StateProofMaxRecoveryIntervals-StateProofInterval
+ // to verify R-StateProofMaxRecoveryIntervals
+ // + 1 would only appear if the sampled round R is: interval - lookback < R < interval.
+ // in this case, the tracker would not yet remove the old one but will create a new one for future state proof.
+ votersForRoundCache map[basics.Round]*ledgercore.VotersForRound
+
+ l ledgerForTracker
+ onlineAccountsFetcher ledgercore.OnlineAccountsFetcher
// loadWaitGroup syncronizing the completion of the loadTree call so that we can
// shutdown the tracker without leaving any running go-routines.
loadWaitGroup sync.WaitGroup
}
-// votersRoundForCertRound computes the round number whose voting participants
-// will be used to sign the compact cert for certRnd.
-func votersRoundForCertRound(certRnd basics.Round, proto config.ConsensusParams) basics.Round {
- // To form a compact certificate for round certRnd,
- // we need a commitment to the voters CompactCertRounds
+// votersRoundForStateProofRound computes the round number whose voting participants
+// will be used to sign the state proof for stateProofRnd.
+func votersRoundForStateProofRound(stateProofRnd basics.Round, proto *config.ConsensusParams) basics.Round {
+ // To form a state proof on period that ends on stateProofRnd,
+ // we need a commitment to the voters StateProofInterval rounds
// before that, and the voters information from
- // CompactCertVotersLookback before that.
- return certRnd.SubSaturate(basics.Round(proto.CompactCertRounds)).SubSaturate(basics.Round(proto.CompactCertVotersLookback))
+ // StateProofVotersLookback before that.
+ return stateProofRnd.SubSaturate(basics.Round(proto.StateProofInterval)).SubSaturate(basics.Round(proto.StateProofVotersLookback))
}
-func (vt *votersTracker) loadFromDisk(l ledgerForTracker, au *accountUpdates) error {
+func (vt *votersTracker) loadFromDisk(l ledgerForTracker, fetcher ledgercore.OnlineAccountsFetcher, latestDbRound basics.Round) error {
vt.l = l
- vt.au = au
- vt.round = make(map[basics.Round]*ledgercore.VotersForRound)
+ vt.votersForRoundCache = make(map[basics.Round]*ledgercore.VotersForRound)
+ vt.onlineAccountsFetcher = fetcher
- latest := l.Latest()
- hdr, err := l.BlockHdr(latest)
+ latestRoundInLedger := l.Latest()
+ hdr, err := l.BlockHdr(latestRoundInLedger)
if err != nil {
return err
}
proto := config.Consensus[hdr.CurrentProtocol]
- if proto.CompactCertRounds == 0 || hdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound == 0 {
+ if proto.StateProofInterval == 0 || hdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound == 0 {
// Disabled, nothing to load.
return nil
}
- startR := votersRoundForCertRound(hdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound, proto)
+ startR := stateproof.GetOldestExpectedStateProof(&hdr)
+ startR = votersRoundForStateProofRound(startR, &proto)
// Sanity check: we should never underflow or even reach 0.
if startR == 0 {
return fmt.Errorf("votersTracker: underflow: %d - %d - %d = %d",
- hdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound, proto.CompactCertRounds, proto.CompactCertVotersLookback, startR)
+ hdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound, proto.StateProofInterval, proto.StateProofVotersLookback, startR)
}
- for r := startR; r <= latest; r += basics.Round(proto.CompactCertRounds) {
+ // we recreate the trees for old rounds. we stop at latestDbRound (where latestDbRound <= latestRoundInLedger) since
+ // future blocks would be given as part of the replay
+ for r := startR; r <= latestDbRound; r += basics.Round(proto.StateProofInterval) {
hdr, err = l.BlockHdr(r)
if err != nil {
return err
@@ -119,35 +131,33 @@ func (vt *votersTracker) loadFromDisk(l ledgerForTracker, au *accountUpdates) er
func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) {
r := hdr.Round
- _, ok := vt.round[r]
+ _, ok := vt.votersForRoundCache[r]
if ok {
// Already loaded.
return
}
proto := config.Consensus[hdr.CurrentProtocol]
- if proto.CompactCertRounds == 0 {
- // No compact certs.
+ if proto.StateProofInterval == 0 {
+ // No StateProofs.
return
}
tr := ledgercore.MakeVotersForRound()
tr.Proto = proto
- vt.round[r] = tr
+ vt.votersForRoundCache[r] = tr
vt.loadWaitGroup.Add(1)
go func() {
defer vt.loadWaitGroup.Done()
- onlineAccounts := ledgercore.TopOnlineAccounts(vt.au.onlineTop)
- err := tr.LoadTree(onlineAccounts, hdr)
+ err := tr.LoadTree(vt.onlineAccountsFetcher, hdr)
if err != nil {
vt.l.trackerLog().Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err)
tr.BroadcastError(err)
}
}()
- return
}
// close waits until all the internal spawned go-routines are done before returning, allowing clean
@@ -158,42 +168,63 @@ func (vt *votersTracker) close() {
func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
proto := config.Consensus[hdr.CurrentProtocol]
- if proto.CompactCertRounds == 0 {
- // No compact certs.
+ if proto.StateProofInterval == 0 {
+ // No StateProofs
return
}
- // Check if any blocks can be forgotten because the compact cert is available.
- for r, tr := range vt.round {
- commitRound := r + basics.Round(tr.Proto.CompactCertVotersLookback)
- certRound := commitRound + basics.Round(tr.Proto.CompactCertRounds)
- if certRound < hdr.CompactCert[protocol.CompactCertBasic].CompactCertNextRound {
- delete(vt.round, r)
- }
- }
+ vt.removeOldVoters(hdr)
// This might be a block where we snapshot the online participants,
- // to eventually construct a merkle tree for commitment in a later
+ // to eventually construct a vector commitment in a later
// block.
r := uint64(hdr.Round)
- if (r+proto.CompactCertVotersLookback)%proto.CompactCertRounds == 0 {
- _, ok := vt.round[basics.Round(r)]
- if ok {
- vt.l.trackerLog().Errorf("votersTracker.newBlock: round %d already present", r)
- } else {
- vt.loadTree(hdr)
+ if (r+proto.StateProofVotersLookback)%proto.StateProofInterval != 0 {
+ return
+ }
+
+ _, ok := vt.votersForRoundCache[basics.Round(r)]
+ if ok {
+ vt.l.trackerLog().Errorf("votersTracker.newBlock: round %d already present", r)
+ } else {
+ vt.loadTree(hdr)
+ }
+
+}
+
+// removeOldVoters removes voters data form the tracker and allows the database to commit previous rounds.
+// voters would be removed if one of the two condition is met
+// 1 - Voters are for a round which was already been confirmed by stateproof
+// 2 - Voters are for a round which is older than the allowed recovery interval.
+// notice that if state proof chain is delayed, votersForRoundCache will not be larger than
+// StateProofMaxRecoveryIntervals + 1
+// ( In order to be able to build and verify X stateproofs back we need X + 1 voters data )
+//
+// It is possible to optimize this function and not to travers votersForRoundCache on every round.
+// Since the map is small (Usually 0 - 2 elements and up to StateProofMaxRecoveryIntervals) we decided to keep the code simple
+// and check for deletion in every round.
+func (vt *votersTracker) removeOldVoters(hdr bookkeeping.BlockHeader) {
+ lowestStateProofRound := stateproof.GetOldestExpectedStateProof(&hdr)
+
+ for r, tr := range vt.votersForRoundCache {
+ commitRound := r + basics.Round(tr.Proto.StateProofVotersLookback)
+ stateProofRound := commitRound + basics.Round(tr.Proto.StateProofInterval)
+
+ // we remove voters that are no longer needed (i.e StateProofNextRound is larger ) or older than the recover period
+ if stateProofRound < lowestStateProofRound {
+ delete(vt.votersForRoundCache, r)
}
}
}
-// lowestRound() returns the lowest round state (blocks and accounts) needed by
+// lowestRound() returns the lowest votersForRoundCache state (blocks and accounts) needed by
// the votersTracker in case of a restart. The accountUpdates tracker will
// not delete account state before this round, so that after a restart, it's
// possible to reconstruct the votersTracker. If votersTracker does
// not need any blocks, it returns base.
func (vt *votersTracker) lowestRound(base basics.Round) basics.Round {
minRound := base
- for r := range vt.round {
+ for r := range vt.votersForRoundCache {
if r < minRound {
minRound = r
}
@@ -203,13 +234,13 @@ func (vt *votersTracker) lowestRound(base basics.Round) basics.Round {
// getVoters() returns the top online participants from round r.
func (vt *votersTracker) getVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
- tr, ok := vt.round[r]
+ tr, ok := vt.votersForRoundCache[r]
if !ok {
- // Not tracked: compact certs not enabled.
+ // Not tracked: stateproofs not enabled.
return nil, nil
}
- // Wait for the Merkle tree to be constructed.
+ // Wait for the vc to be constructed.
err := tr.Wait()
if err != nil {
return nil, err
diff --git a/ledger/voters_test.go b/ledger/voters_test.go
new file mode 100644
index 000000000..b13b11d97
--- /dev/null
+++ b/ledger/voters_test.go
@@ -0,0 +1,231 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts) {
+ updates := ledgercore.MakeAccountDeltas(1)
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ ao.newBlock(blk, delta)
+}
+
+func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ intervalForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
+ numOfIntervals := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals - 1
+ lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, ao := newAcctUpdates(t, ml, conf)
+ defer au.close()
+ defer ao.close()
+
+ i := uint64(1)
+ // adding blocks to the voterstracker (in order to pass the numOfIntervals*stateproofInterval we add 1)
+ for ; i < (numOfIntervals*intervalForTest)+1; i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+
+ a.Equal(numOfIntervals, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ block := randomBlock(basics.Round(i))
+ i++
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+
+ // committing stateproof that confirm the (numOfIntervals - 1)th interval
+ var stateTracking bookkeeping.StateProofTrackingData
+ stateTracking.StateProofNextRound = basics.Round((numOfIntervals - 1) * intervalForTest)
+ block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
+ addBlockToAccountsUpdate(block.block, ao)
+
+ // the tracker should have 3 entries
+ // - voters to confirm the numOfIntervals - 1 th interval
+ // - voters to confirm the numOfIntervals th interval
+ // - voters to confirm the numOfIntervals + 1 th interval
+ a.Equal(uint64(3), uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round((numOfIntervals-2)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ block = randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ stateTracking.StateProofNextRound = basics.Round(numOfIntervals * intervalForTest)
+ block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
+ addBlockToAccountsUpdate(block.block, ao)
+
+ a.Equal(uint64(2), uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round((numOfIntervals-1)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+}
+
+func TestLimitVoterTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ intervalForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
+ recoveryIntervalForTests := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals
+ lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, ao := newAcctUpdates(t, ml, conf)
+ defer au.close()
+ defer ao.close()
+
+ i := uint64(1)
+
+ // since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs
+ // after intervalForTest*(recoveryIntervalForTests+3)
+
+ // should not give up on any state proof
+ for ; i < intervalForTest*(recoveryIntervalForTests+2); i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+
+ // the votersForRoundCache should contains recoveryIntervalForTests+2 elements:
+ // recoveryIntervalForTests - since this is the recovery interval
+ // + 1 - since votersForRoundCache would contain the votersForRound for the next state proof to come
+ // + 1 - in order to confirm recoveryIntervalForTests number of state proofs we need recoveryIntervalForTests + 1 headers (for the commitment)
+ a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ // after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
+ for ; i < intervalForTest*(recoveryIntervalForTests+3)+1; i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+
+ a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*2-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ // after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
+ for ; i < intervalForTest*(recoveryIntervalForTests+4)+1; i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+ a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ // if the last round of the intervalForTest has not been added to the ledger the votersTracker would
+ // retain one more element
+ for ; i < intervalForTest*(recoveryIntervalForTests+5); i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+ a.Equal(recoveryIntervalForTests+3, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+
+ for ; i < intervalForTest*(recoveryIntervalForTests+5)+1; i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+ a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*4-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
+}
+
+func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ intervalForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
+ lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, ao := newAcctUpdates(t, ml, conf)
+ defer au.close()
+ defer ao.close()
+
+ i := uint64(1)
+ for ; i < (intervalForTest)+1; i++ {
+ block := randomBlock(basics.Round(i))
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(block.block, ao)
+ }
+
+ top, err := ao.voters.getVoters(basics.Round(intervalForTest - lookbackForTest))
+ a.NoError(err)
+ for j := 0; j < len(top.Participants); j++ {
+ a.Equal(merklesignature.NoKeysCommitment, top.Participants[j].PK.Commitment)
+ }
+}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index ea9954528..b19d379a3 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -513,17 +513,18 @@ func (c *Client) signAndBroadcastTransactionWithWallet(walletHandle, pw []byte,
// M | 0 | first + validRounds - 1
// M | M | error
//
-func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (uint64, uint64, error) {
+func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (first, last, latest uint64, err error) {
params, err := c.SuggestedParams()
if err != nil {
- return 0, 0, err
+ return 0, 0, 0, err
}
cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
- return 0, 0, fmt.Errorf("cannot construct transaction: unknown consensus protocol %s", params.ConsensusVersion)
+ return 0, 0, 0, fmt.Errorf("cannot construct transaction: unknown consensus protocol %s", params.ConsensusVersion)
}
- return computeValidityRounds(firstValid, lastValid, validRounds, params.LastRound, cparams.MaxTxnLife)
+ first, last, err = computeValidityRounds(firstValid, lastValid, validRounds, params.LastRound, cparams.MaxTxnLife)
+ return first, last, params.LastRound, err
}
func computeValidityRounds(firstValid, lastValid, validRounds, lastRound, maxTxnLife uint64) (uint64, uint64, error) {
@@ -1210,11 +1211,20 @@ func (c *Client) Dryrun(data []byte) (resp generatedV2.DryrunResponse, err error
return
}
-// TxnProof returns a Merkle proof for a transaction in a block.
-func (c *Client) TxnProof(txid string, round uint64, hashType crypto.HashType) (resp generatedV2.ProofResponse, err error) {
+// TransactionProof returns a Merkle proof for a transaction in a block.
+func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.HashType) (resp generatedV2.TransactionProofResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
- return algod.Proof(txid, round, hashType)
+ return algod.TransactionProof(txid, round, hashType)
+ }
+ return
+}
+
+// LightBlockHeaderProof returns a Merkle proof for a block.
+func (c *Client) LightBlockHeaderProof(round uint64) (resp generatedV2.LightBlockHeaderProofResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.LightBlockHeaderProof(round)
}
return
}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index a03a9d551..c28fd0216 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -220,11 +220,11 @@ func generateRegistrationTransaction(part generated.ParticipationKey, fee basics
return transactions.Transaction{}, fmt.Errorf("state proof key pointer is nil")
}
- if len(*part.Key.StateProofKey) != len(merklesignature.Verifier{}) {
- return transactions.Transaction{}, fmt.Errorf("state proof key is the wrong size, should be %d but it is %d", len(merklesignature.Verifier{}), len(*part.Key.StateProofKey))
+ if len(*part.Key.StateProofKey) != len(merklesignature.Commitment{}) {
+ return transactions.Transaction{}, fmt.Errorf("state proof key is the wrong size, should be %d but it is %d", len(merklesignature.Commitment{}), len(*part.Key.StateProofKey))
}
- var stateProofPk merklesignature.Verifier
+ var stateProofPk merklesignature.Commitment
copy(stateProofPk[:], (*part.Key.StateProofKey)[:])
t := transactions.Transaction{
diff --git a/logging/collector.go b/logging/collector.go
index a443b15ac..7e3bedf2d 100644
--- a/logging/collector.go
+++ b/logging/collector.go
@@ -131,7 +131,7 @@ func addFile(tw *tar.Writer, filePath string) error {
return err
}
// copy the file data to the tarball
- if _, err := io.Copy(tw, file); err != nil {
+ if _, err := io.CopyN(tw, file, stat.Size()); err != nil {
return err
}
}
diff --git a/logging/log.go b/logging/log.go
index d0384d0a8..1849774ed 100644
--- a/logging/log.go
+++ b/logging/log.go
@@ -353,6 +353,11 @@ func Base() Logger {
// NewLogger returns a new Logger logging to out.
func NewLogger() Logger {
l := logrus.New()
+ return NewWrappedLogger(l)
+}
+
+// NewWrappedLogger returns a new Logger that wraps an external logrus logger.
+func NewWrappedLogger(l *logrus.Logger) Logger {
out := logger{
logrus.NewEntry(l),
&loggerState{},
diff --git a/logging/telemetryConfig_test.go b/logging/telemetryConfig_test.go
index e0522a304..dd7cce322 100644
--- a/logging/telemetryConfig_test.go
+++ b/logging/telemetryConfig_test.go
@@ -18,7 +18,6 @@ package logging
import (
"encoding/json"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -65,7 +64,7 @@ func Test_CreateSaveLoadTelemetryConfig(t *testing.T) {
testDir := os.Getenv("TESTDIR")
if testDir == "" {
- testDir, _ = ioutil.TempDir("", "tmp")
+ testDir = t.TempDir()
}
a := require.New(t)
@@ -143,7 +142,7 @@ func TestSaveTelemetryConfigBlankUsernamePassword(t *testing.T) {
testDir := os.Getenv("TESTDIR")
if testDir == "" {
- testDir, _ = ioutil.TempDir("", "tmp")
+ testDir = t.TempDir()
}
a := require.New(t)
diff --git a/logging/telemetryhook_test.go b/logging/telemetryhook_test.go
index 4699281a5..09d916ced 100644
--- a/logging/telemetryhook_test.go
+++ b/logging/telemetryhook_test.go
@@ -17,7 +17,6 @@
package logging
import (
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -46,12 +45,10 @@ func TestLoadDefaultConfig(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- configDir, err := ioutil.TempDir("", "testdir")
- defer os.RemoveAll(configDir)
- currentRoot := config.SetGlobalConfigFileRoot(configDir)
+ currentRoot := config.SetGlobalConfigFileRoot(t.TempDir())
defer config.SetGlobalConfigFileRoot(currentRoot)
- _, err = EnsureTelemetryConfig(nil, "")
+ _, err := EnsureTelemetryConfig(nil, "")
a.Nil(err)
@@ -71,17 +68,15 @@ func TestLoggingConfigDataDirFirst(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- globalConfigRoot, err := ioutil.TempDir("", "globalConfigRoot")
- defer os.RemoveAll(globalConfigRoot)
+ globalConfigRoot := t.TempDir()
oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot)
defer config.SetGlobalConfigFileRoot(oldConfigRoot)
globalLoggingPath := filepath.Join(globalConfigRoot, TelemetryConfigFilename)
- dataDir, err := ioutil.TempDir("", "dataDir")
- defer os.RemoveAll(dataDir)
+ dataDir := t.TempDir()
dataDirLoggingPath := filepath.Join(dataDir, TelemetryConfigFilename)
- _, err = os.Stat(globalLoggingPath)
+ _, err := os.Stat(globalLoggingPath)
a.True(os.IsNotExist(err))
_, err = os.Stat(dataDirLoggingPath)
a.True(os.IsNotExist(err))
@@ -117,13 +112,12 @@ func TestLoggingConfigGlobalSecond(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- globalConfigRoot, err := ioutil.TempDir("", "globalConfigRoot")
- defer os.RemoveAll(globalConfigRoot)
+ globalConfigRoot := t.TempDir()
oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot)
defer config.SetGlobalConfigFileRoot(oldConfigRoot)
globalLoggingPath := filepath.Join(globalConfigRoot, TelemetryConfigFilename)
- _, err = os.Stat(globalLoggingPath)
+ _, err := os.Stat(globalLoggingPath)
a.True(os.IsNotExist(err))
cfgPath := "/missing-directory"
@@ -150,14 +144,12 @@ func TestSaveLoadConfig(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- globalConfigRoot, err := ioutil.TempDir("", "globalConfigRoot")
- defer os.RemoveAll(globalConfigRoot)
+ globalConfigRoot := t.TempDir()
oldConfigRoot := config.SetGlobalConfigFileRoot(globalConfigRoot)
defer config.SetGlobalConfigFileRoot(oldConfigRoot)
- configDir, err := ioutil.TempDir("", "testdir")
- os.RemoveAll(configDir)
- err = os.Mkdir(configDir, 0777)
+ configDir := t.TempDir()
+ err := os.Mkdir(configDir, 0777)
cfg, err := EnsureTelemetryConfig(&configDir, "")
cfg.Name = "testname"
@@ -178,8 +170,6 @@ func TestSaveLoadConfig(t *testing.T) {
a.NoError(err)
a.Equal("testname", cfgLoad.Name)
a.Equal(cfgLoad, cfg)
-
- os.RemoveAll(configDir)
}
func TestAsyncTelemetryHook_CloseDrop(t *testing.T) {
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index 209c14d8e..5ce2a311d 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -17,6 +17,8 @@
package telemetryspec
import (
+ "bytes"
+ "fmt"
"strconv"
"strings"
"time"
@@ -57,6 +59,17 @@ type AssembleBlockStats struct {
ProcessingTime transactionProcessingTimeDistibution
BlockGenerationDuration uint64
TransactionsLoopStartTime int64
+ StateProofNextRound uint64 // next round for which state proof if expected
+ StateProofStats StateProofStats
+}
+
+// StateProofStats is the set of stats captured when a StateProof is present in the assembled block
+type StateProofStats struct {
+ ProvenWeight uint64
+ SignedWeight uint64
+ NumReveals int
+ NumPosToReveal int
+ TxnSize int
}
// AssembleBlockTimeout represents AssemblePayset exiting due to timeout
@@ -82,6 +95,37 @@ type AssembleBlockMetrics struct {
func (m AssembleBlockMetrics) Identifier() Metric {
return assembleBlockMetricsIdentifier
}
+func (m AssembleBlockStats) String() string {
+ b := &bytes.Buffer{}
+ b.WriteString(fmt.Sprintf("StartCount:%d, ", m.StartCount))
+ b.WriteString(fmt.Sprintf("IncludedCount:%d, ", m.IncludedCount))
+ b.WriteString(fmt.Sprintf("InvalidCount:%d, ", m.InvalidCount))
+ b.WriteString(fmt.Sprintf("MinFee:%d, ", m.MinFee))
+ b.WriteString(fmt.Sprintf("MaxFee:%d, ", m.MaxFee))
+ b.WriteString(fmt.Sprintf("AverageFee:%d, ", m.AverageFee))
+ b.WriteString(fmt.Sprintf("MinLength:%d, ", m.MinLength))
+ b.WriteString(fmt.Sprintf("MaxLength:%d, ", m.MaxLength))
+ b.WriteString(fmt.Sprintf("MinPriority:%d, ", m.MinPriority))
+ b.WriteString(fmt.Sprintf("MaxPriority:%d, ", m.MaxPriority))
+ b.WriteString(fmt.Sprintf("CommittedCount:%d, ", m.CommittedCount))
+ b.WriteString(fmt.Sprintf("StopReason:%s, ", m.StopReason))
+ b.WriteString(fmt.Sprintf("TotalLength:%d, ", m.TotalLength))
+ b.WriteString(fmt.Sprintf("EarlyCommittedCount:%d, ", m.EarlyCommittedCount))
+ b.WriteString(fmt.Sprintf("Nanoseconds:%d, ", m.Nanoseconds))
+ b.WriteString(fmt.Sprintf("ProcessingTime:%v, ", m.ProcessingTime))
+ b.WriteString(fmt.Sprintf("BlockGenerationDuration:%d, ", m.BlockGenerationDuration))
+ b.WriteString(fmt.Sprintf("TransactionsLoopStartTime:%d, ", m.TransactionsLoopStartTime))
+ b.WriteString(fmt.Sprintf("StateProofNextRound:%d, ", m.StateProofNextRound))
+ emptySPStats := StateProofStats{}
+ if m.StateProofStats != emptySPStats {
+ b.WriteString(fmt.Sprintf("ProvenWeight:%d, ", m.StateProofStats.ProvenWeight))
+ b.WriteString(fmt.Sprintf("SignedWeight:%d, ", m.StateProofStats.SignedWeight))
+ b.WriteString(fmt.Sprintf("NumReveals:%d, ", m.StateProofStats.NumReveals))
+ b.WriteString(fmt.Sprintf("NumPosToReveal:%d, ", m.StateProofStats.NumPosToReveal))
+ b.WriteString(fmt.Sprintf("TxnSize:%d", m.StateProofStats.TxnSize))
+ }
+ return b.String()
+}
//-------------------------------------------------------
// ProcessBlock
diff --git a/logging/telemetryspec/metric_test.go b/logging/telemetryspec/metric_test.go
index 4b470c1c5..c6d6489da 100644
--- a/logging/telemetryspec/metric_test.go
+++ b/logging/telemetryspec/metric_test.go
@@ -18,11 +18,13 @@ package telemetryspec
import (
"encoding/json"
+ "reflect"
"testing"
"time"
- "github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestTransactionProcessingTimeDistibutionFormatting(t *testing.T) {
@@ -47,3 +49,40 @@ func TestTransactionProcessingTimeDistibutionFormatting(t *testing.T) {
require.NoError(t, err)
require.Equal(t, []byte("{\"ProcessingTime\":[2,3,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}"), bytes)
}
+
+func TestAssembleBlockStatsString(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var abs AssembleBlockStats
+ localType := reflect.TypeOf(abs)
+
+ // Empty StateProofStats will not be reported. Set a filed to check it printed
+ abs.StateProofStats.ProvenWeight = 1
+ absString := abs.String()
+ for f := 0; f < localType.NumField(); f++ {
+ field := localType.Field(f)
+ if field.Type.Kind() == reflect.Struct && field.Type.NumField() > 1 {
+ for nf := 0; nf < field.Type.NumField(); nf++ {
+ nestedField := field.Type.Field(nf)
+ require.Contains(t, absString, nestedField.Name)
+ }
+ continue
+ }
+ require.Contains(t, absString, field.Name)
+ }
+
+ // Make sure the StateProofStats is not reported if they are empty
+ abs.StateProofStats.ProvenWeight = 0
+ absString = abs.String()
+ for f := 0; f < localType.NumField(); f++ {
+ field := localType.Field(f)
+ if field.Name == "StateProofStats" {
+ for nf := 0; nf < field.Type.NumField(); nf++ {
+ nestedField := field.Type.Field(nf)
+ require.NotContains(t, absString, nestedField.Name)
+ }
+ continue
+ }
+ require.Contains(t, absString, field.Name)
+ }
+}
diff --git a/netdeploy/network.go b/netdeploy/network.go
index 8f972625c..f54eef4cc 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -81,13 +81,20 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor
return n, err
}
+ if n.cfg.Name == "" {
+ n.cfg.Name = template.Genesis.NetworkName
+ }
+ if n.cfg.Name == "" {
+ return n, fmt.Errorf("unnamed network. Use the \"network\" flag or \"Genesis.NetworkName\" in the network template")
+ }
+
// Create the network root directory so we can generate genesis.json and prepare node data directories
err = os.MkdirAll(rootDir, os.ModePerm)
if err != nil {
return n, err
}
template.Consensus = consensus
- err = template.generateGenesisAndWallets(rootDir, name, binDir)
+ err = template.generateGenesisAndWallets(rootDir, n.cfg.Name, binDir)
if err != nil {
return n, err
}
diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go
index b3f99f973..c3e6445bd 100644
--- a/netdeploy/networkTemplates_test.go
+++ b/netdeploy/networkTemplates_test.go
@@ -17,7 +17,6 @@
package netdeploy
import (
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -60,12 +59,11 @@ func TestGenerateGenesis(t *testing.T) {
templateDir, _ := filepath.Abs("../test/testdata/nettemplates")
template, _ := loadTemplate(filepath.Join(templateDir, "David20.json"))
- targetFolder, err := ioutil.TempDir("", "netroot")
- defer os.RemoveAll(targetFolder)
+ targetFolder := t.TempDir()
networkName := "testGenGen"
binDir := os.ExpandEnv("${GOPATH}/bin")
- err = template.generateGenesisAndWallets(targetFolder, networkName, binDir)
+ err := template.generateGenesisAndWallets(targetFolder, networkName, binDir)
a.NoError(err)
_, err = os.Stat(filepath.Join(targetFolder, config.GenesisJSONFile))
fileExists := err == nil
diff --git a/netdeploy/network_test.go b/netdeploy/network_test.go
index a4d663894..383d7f952 100644
--- a/netdeploy/network_test.go
+++ b/netdeploy/network_test.go
@@ -17,7 +17,6 @@
package netdeploy
import (
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -39,8 +38,7 @@ func TestSaveNetworkCfg(t *testing.T) {
TemplateFile: "testTemplate",
}
- tmpFolder, _ := ioutil.TempDir("", "tmp")
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
cfgFile := filepath.Join(tmpFolder, configFileName)
err := saveNetworkCfg(cfg, cfgFile)
a.Nil(err)
@@ -53,8 +51,7 @@ func TestSaveConsensus(t *testing.T) {
a := require.New(t)
- tmpFolder, _ := ioutil.TempDir("", "tmp")
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
relayDir := filepath.Join(tmpFolder, "testRelayDir")
err := os.MkdirAll(relayDir, 0744)
a.NoError(err)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index eefd3b032..a7c874cda 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -127,10 +127,10 @@ var incomingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_in
var outgoingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_outgoing_peers", Description: "Number of active outgoing peers."})
// peerDisconnectionAckDuration defines the time we would wait for the peer disconnection to compelete.
-const peerDisconnectionAckDuration time.Duration = 5 * time.Second
+const peerDisconnectionAckDuration = 5 * time.Second
// peerShutdownDisconnectionAckDuration defines the time we would wait for the peer disconnection to compelete during shutdown.
-const peerShutdownDisconnectionAckDuration time.Duration = 50 * time.Millisecond
+const peerShutdownDisconnectionAckDuration = 50 * time.Millisecond
// Peer opaque interface for referring to a neighbor in the network
type Peer interface{}
@@ -767,7 +767,7 @@ func (wn *WebsocketNetwork) setup() {
wn.messagesOfInterestRefresh = make(chan struct{}, 2)
wn.messagesOfInterestGeneration = 1 // something nonzero so that any new wsPeer needs updating
if wn.relayMessages {
- wn.RegisterMessageInterest(protocol.CompactCertSigTag)
+ wn.RegisterMessageInterest(protocol.StateProofSigTag)
}
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 424586f01..6f14e669f 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -1794,7 +1794,6 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
incomingMsgSync := deadlock.Mutex{}
msgCounters := make(map[protocol.Tag]int)
expectedCounts := make(map[protocol.Tag]int)
- expectedCounts[ft1] = 5
expectedCounts[ft2] = 5
var failed uint32
messageArriveWg := sync.WaitGroup{}
@@ -1839,21 +1838,20 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
// have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
- require.NoError(t, netB.RegisterMessageInterest(ft1))
require.NoError(t, netB.RegisterMessageInterest(ft2))
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
waitPeerInternalChanQuiet(t, netA)
- messageArriveWg.Add(5 * 2) // we're expecting exactly 10 messages.
+ messageArriveWg.Add(5) // we're expecting exactly 5 messages.
// send 5 messages of few types.
for i := 0; i < 5; i++ {
if atomic.LoadUint32(&failed) != 0 {
t.Errorf("failed")
break
}
- netA.Broadcast(context.Background(), ft1, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), ft1, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
netA.Broadcast(context.Background(), ft3, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
netA.Broadcast(context.Background(), ft2, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), ft4, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
@@ -1865,7 +1863,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
messageArriveWg.Wait()
incomingMsgSync.Lock()
defer incomingMsgSync.Unlock()
- require.Equal(t, 2, len(msgCounters))
+ require.Equal(t, 1, len(msgCounters))
for tag, count := range msgCounters {
if atomic.LoadUint32(&failed) != 0 {
t.Errorf("failed")
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 2b451478e..594accb3d 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -38,7 +38,7 @@ import (
"github.com/algorand/go-algorand/util/metrics"
)
-const maxMessageLength = 4 * 1024 * 1024 // Currently the biggest message is VB vote bundles. TODO: per message type size limit?
+const maxMessageLength = 6 * 1024 * 1024 // Currently the biggest message is VB vote bundles. TODO: per message type size limit?
const averageMessageLength = 2 * 1024 // Most of the messages are smaller than this size, which makes it into a good base allocation.
// This parameter controls how many messages from a single peer can be
diff --git a/node/netprio.go b/node/netprio.go
index ede13b661..9057fb328 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -131,7 +131,7 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
}
ephID := basics.OneTimeIDForRound(rs.Round, data.KeyDilution(proto))
- if !data.VoteID.Verify(ephID, rs.Response, rs.Sig, proto.EnableBatchVerification) {
+ if !data.VoteID.Verify(ephID, rs.Response, rs.Sig) {
err = fmt.Errorf("signature verification failure")
return
}
diff --git a/node/node.go b/node/node.go
index 55668c1b0..7f0df8140 100644
--- a/node/node.go
+++ b/node/node.go
@@ -31,7 +31,6 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/agreement/gossip"
"github.com/algorand/go-algorand/catchup"
- "github.com/algorand/go-algorand/compactcert"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
@@ -50,6 +49,7 @@ import (
"github.com/algorand/go-algorand/node/indexer"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/stateproof"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-algorand/util/metrics"
@@ -61,6 +61,13 @@ const (
participationRegistryFlushMaxWaitDuration = 30 * time.Second
)
+const (
+ bitMismatchingVotingKey = 1 << iota
+ bitMismatchingSelectionKey
+ bitAccountOffline
+ bitAccountIsClosed
+)
+
// StatusReport represents the current basic status of the node
type StatusReport struct {
LastRound basics.Round
@@ -137,7 +144,7 @@ type AlgorandFullNode struct {
tracer messagetracer.MessageTracer
- compactCert *compactcert.Worker
+ stateProofWorker *stateproof.Worker
}
// TxnWithStatus represents information about a single transaction,
@@ -301,13 +308,17 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.tracer = messagetracer.NewTracer(log).Init(cfg)
gossip.SetTrace(agreementParameters.Network, node.tracer)
- compactCertPathname := filepath.Join(genesisDir, config.CompactCertFilename)
- compactCertAccess, err := db.MakeAccessor(compactCertPathname, false, false)
+ // Delete the deprecated database file if it exists. This can be removed in future updates since this file should not exist by then.
+ oldCompactCertPath := filepath.Join(genesisDir, "compactcert.sqlite")
+ os.Remove(oldCompactCertPath)
+
+ stateProofPathname := filepath.Join(genesisDir, config.StateProofFileName)
+ stateProofAccess, err := db.MakeAccessor(stateProofPathname, false, false)
if err != nil {
- log.Errorf("Cannot load compact cert data: %v", err)
+ log.Errorf("Cannot load state proof data: %v", err)
return nil, err
}
- node.compactCert = compactcert.NewWorker(compactCertAccess, node.log, node.accountManager, node.ledger.Ledger, node.net, node)
+ node.stateProofWorker = stateproof.NewWorker(stateProofAccess, node.log, node.accountManager, node.ledger.Ledger, node.net, node)
return node, err
}
@@ -346,7 +357,7 @@ func (node *AlgorandFullNode) Start() {
node.blockService.Start()
node.ledgerService.Start()
node.txHandler.Start()
- node.compactCert.Start()
+ node.stateProofWorker.Start()
startNetwork()
// start indexer
if idx, err := node.Indexer(); err == nil {
@@ -397,10 +408,8 @@ func (node *AlgorandFullNode) Stop() {
defer func() {
node.mu.Unlock()
node.waitMonitoringRoutines()
- // we want to shut down the compactCert last, since the oldKeyDeletionThread might depend on it when making the
- // call to LatestSigsFromThisNode.
- node.compactCert.Shutdown()
- node.compactCert = nil
+ node.stateProofWorker.Shutdown()
+ node.stateProofWorker = nil
}()
node.net.ClearHandlers()
@@ -490,7 +499,7 @@ func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.Sign
return err
}
- _, err = verify.TxnGroup(txgroup, b, node.ledger.VerifiedTransactionCache())
+ _, err = verify.TxnGroup(txgroup, b, node.ledger.VerifiedTransactionCache(), node.ledger)
if err != nil {
node.log.Warnf("malformed transaction: %v", err)
return err
@@ -869,7 +878,8 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
}
// Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value
- added := node.accountManager.AddParticipation(partkey)
+ // This is ephemeral since we are deleting the file after this function is done
+ added := node.accountManager.AddParticipation(partkey, true)
if !added {
return account.ParticipationID{}, fmt.Errorf("ParticipationRegistry: cannot register duplicate participation key")
}
@@ -939,7 +949,9 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
// Tell the AccountManager about the Participation (dupes don't matter)
// make sure that all stateproof data (with are not the keys per round)
// are being store to the registry in that point
- added := node.accountManager.AddParticipation(part)
+ // These files are not ephemeral and must be deleted eventually since
+ // this function is called to load files located in the node on startup
+ added := node.accountManager.AddParticipation(part, false)
if added {
node.log.Infof("Loaded participation keys from storage: %s %s", part.Address(), info.Name())
} else {
@@ -1023,8 +1035,6 @@ func (node *AlgorandFullNode) oldKeyDeletionThread(done <-chan struct{}) {
r := node.ledger.Latest()
- // We need the latest header to determine the next compact cert
- // round, if any.
latestHdr, err := node.ledger.BlockHdr(r)
if err != nil {
switch err.(type) {
@@ -1255,6 +1265,23 @@ func (node *AlgorandFullNode) AssembleBlock(round basics.Round) (agreement.Valid
return validatedBlock{vb: lvb}, nil
}
+// getOfflineClosedStatus will return an int with the appropriate bit(s) set if it is offline and/or online
+func getOfflineClosedStatus(acctData basics.OnlineAccountData) int {
+ rval := 0
+ isOffline := acctData.VoteFirstValid == 0 && acctData.VoteLastValid == 0
+
+ if isOffline {
+ rval = rval | bitAccountOffline
+ }
+
+ isClosed := isOffline && acctData.MicroAlgosWithRewards.Raw == 0
+ if isClosed {
+ rval = rval | bitAccountIsClosed
+ }
+
+ return rval
+}
+
// VotingKeys implements the key manager's VotingKeys method, and provides additional validation with the ledger.
// that allows us to load multiple overlapping keys for the same account, and filter these per-round basis.
func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []account.ParticipationRecordForRound {
@@ -1268,12 +1295,13 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
accountsData := make(map[basics.Address]basics.OnlineAccountData, len(parts))
matchingAccountsKeys := make(map[basics.Address]bool)
mismatchingAccountsKeys := make(map[basics.Address]int)
- const bitMismatchingVotingKey = 1
- const bitMismatchingSelectionKey = 2
+
for _, p := range parts {
acctData, hasAccountData := accountsData[p.Account]
if !hasAccountData {
var err error
+ // LookupAgreement is used to look at the past ~320 rounds of account state
+ // It provides a fast lookup method for online account information
acctData, err = node.ledger.LookupAgreement(keysRound, p.Account)
if err != nil {
node.log.Warnf("node.VotingKeys: Account %v not participating: cannot locate account for round %d : %v", p.Account, keysRound, err)
@@ -1282,6 +1310,8 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
accountsData[p.Account] = acctData
}
+ mismatchingAccountsKeys[p.Account] = mismatchingAccountsKeys[p.Account] | getOfflineClosedStatus(acctData)
+
if acctData.VoteID != p.Voting.OneTimeSignatureVerifier {
mismatchingAccountsKeys[p.Account] = mismatchingAccountsKeys[p.Account] | bitMismatchingVotingKey
continue
@@ -1304,14 +1334,22 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
if matchingAccountsKeys[mismatchingAddr] {
continue
}
- if warningFlags&bitMismatchingVotingKey == bitMismatchingVotingKey {
- node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain voting key differ from participation voting key for round %d", mismatchingAddr, votingRound, keysRound)
- continue
- }
- if warningFlags&bitMismatchingSelectionKey == bitMismatchingSelectionKey {
- node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain selection key differ from participation selection key for round %d", mismatchingAddr, votingRound, keysRound)
+ if warningFlags&bitMismatchingVotingKey != 0 || warningFlags&bitMismatchingSelectionKey != 0 {
+ // If we are closed, upgrade this to info so we don't spam telemetry reporting
+ if warningFlags&bitAccountIsClosed != 0 {
+ node.log.Infof("node.VotingKeys: Address: %v - Account was closed but still has a participation key active.", mismatchingAddr)
+ } else if warningFlags&bitAccountOffline != 0 {
+ // If account is offline, then warn that no registration transaction has been issued or that previous registration transaction is expired.
+ node.log.Warnf("node.VotingKeys: Address: %v - Account is offline. No registration transaction has been issued or a previous registration transaction has expired", mismatchingAddr)
+ } else {
+ // If the account isn't closed/offline and has a valid participation key, then this key may have been generated
+ // on a different node.
+ node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain voting key differ from participation voting key for round %d. Consider regenerating the participation key for this node.", mismatchingAddr, votingRound, keysRound)
+ }
+
continue
}
+
}
return participations
}
diff --git a/node/node_test.go b/node/node_test.go
index 6dfdd1d90..f440810f6 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -18,7 +18,6 @@ package node
import (
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -56,7 +55,7 @@ var defaultConfig = config.Local{
IncomingConnectionsLimit: -1,
}
-func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationPool execpool.BacklogPool, customConsensus config.ConsensusProtocols) ([]*AlgorandFullNode, []string, []string) {
+func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationPool execpool.BacklogPool, customConsensus config.ConsensusProtocols) ([]*AlgorandFullNode, []string) {
util.SetFdSoftLimit(1000)
f, _ := os.Create(t.Name() + ".log")
logging.Base().SetJSONFormatter()
@@ -93,15 +92,14 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
}
for i := range wallets {
- rootDirectory, err := ioutil.TempDir("", "testdir"+t.Name()+strconv.Itoa(i))
+ rootDirectory := t.TempDir()
rootDirs = append(rootDirs, rootDirectory)
- require.NoError(t, err)
defaultConfig.NetAddress = "127.0.0.1:0"
defaultConfig.SaveToDisk(rootDirectory)
// Save empty phonebook - we'll add peers after they've been assigned listening ports
- err = config.SavePhonebookToDisk(make([]string, 0), rootDirectory)
+ err := config.SavePhonebookToDisk(make([]string, 0), rootDirectory)
require.NoError(t, err)
genesisDir := filepath.Join(rootDirectory, g.ID())
@@ -174,7 +172,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
require.NoError(t, err)
}
- return nodes, wallets, rootDirs
+ return nodes, wallets
}
func TestSyncingFullNode(t *testing.T) {
@@ -185,10 +183,9 @@ func TestSyncingFullNode(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- nodes, wallets, rootDirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
+ nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
- defer os.RemoveAll(rootDirs[i])
defer nodes[i].Stop()
}
@@ -244,10 +241,9 @@ func TestInitialSync(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- nodes, wallets, rootdirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
+ nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
- defer os.RemoveAll(rootdirs[i])
defer nodes[i].Stop()
}
initialRound := nodes[0].ledger.NextRound()
@@ -313,10 +309,9 @@ func TestSimpleUpgrade(t *testing.T) {
testParams1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
configurableConsensus[consensusTest1] = testParams1
- nodes, wallets, rootDirs := setupFullNodes(t, consensusTest0, backlogPool, configurableConsensus)
+ nodes, wallets := setupFullNodes(t, consensusTest0, backlogPool, configurableConsensus)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
- defer os.RemoveAll(rootDirs[i])
defer nodes[i].Stop()
}
@@ -483,8 +478,7 @@ func (m mismatchingDirectroyPermissionsLog) Errorf(fmts string, args ...interfac
func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
partitiontest.PartitionTest(t)
- testDirectroy, err := ioutil.TempDir(os.TempDir(), t.Name())
- require.NoError(t, err)
+ testDirectroy := t.TempDir()
genesis := bookkeeping.Genesis{
SchemaID: "go-test-node-genesis",
@@ -511,8 +505,7 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
func TestAsyncRecord(t *testing.T) {
partitiontest.PartitionTest(t)
- testDirectroy, err := ioutil.TempDir(os.TempDir(), t.Name())
- require.NoError(t, err)
+ testDirectroy := t.TempDir()
genesis := bookkeeping.Genesis{
SchemaID: "go-test-node-record-async",
@@ -553,3 +546,32 @@ func TestAsyncRecord(t *testing.T) {
require.Equal(t, 10000, int(records[0].LastVote))
require.Equal(t, 20000, int(records[0].LastBlockProposal))
}
+
+// TestOfflineOnlineClosedBitStatus a test that validates that the correct bits are being set
+func TestOfflineOnlineClosedBitStatus(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tests := []struct {
+ name string
+ acctData basics.OnlineAccountData
+ expectedInt int
+ }{
+ {"online 1", basics.OnlineAccountData{
+ VotingData: basics.VotingData{VoteFirstValid: 1, VoteLastValid: 100},
+ MicroAlgosWithRewards: basics.MicroAlgos{Raw: 0}}, 0},
+ {"online 2", basics.OnlineAccountData{
+ VotingData: basics.VotingData{VoteFirstValid: 1, VoteLastValid: 100},
+ MicroAlgosWithRewards: basics.MicroAlgos{Raw: 1}}, 0},
+ {"offline & not closed", basics.OnlineAccountData{
+ VotingData: basics.VotingData{VoteFirstValid: 0, VoteLastValid: 0},
+ MicroAlgosWithRewards: basics.MicroAlgos{Raw: 1}}, 0 | bitAccountOffline},
+ {"offline & closed", basics.OnlineAccountData{
+ VotingData: basics.VotingData{VoteFirstValid: 0, VoteLastValid: 0},
+ MicroAlgosWithRewards: basics.MicroAlgos{Raw: 0}}, 0 | bitAccountOffline | bitAccountIsClosed},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ require.Equal(t, test.expectedInt, getOfflineClosedStatus(test.acctData))
+ })
+ }
+}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index e37f2dbe6..11a5c5f07 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -128,7 +128,7 @@ const ConsensusV23 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/e5f565421d720c6f75cdd186f7098495caf9101f",
)
-// ConsensusV24 include the applications, rekeying and teal v2
+// ConsensusV24 include the applications, rekeying and AVM v2
const ConsensusV24 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/3a83c4c743f8b17adfd73944b4319c25722a6782",
)
@@ -176,6 +176,17 @@ const ConsensusV32 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/d5ac876d7ede07367dbaa26e149aa42589aac1f7",
)
+// ConsensusV33 enables large blocks, the deeper block history for TEAL
+// and catchpoint generation round after lowering in-memory deltas size (320 -> 4).
+const ConsensusV33 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/830a4e673148498cc7230a0d1ba1ed0a5471acc6",
+)
+
+// ConsensusV34 enables the TEAL v7 opcodes, stateproofs, shorter lambda.
+const ConsensusV34 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/2dd5435993f6f6d65691140f592ebca5ef19ffbd",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -188,7 +199,7 @@ const ConsensusFuture = ConsensusVersion(
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV32
+const ConsensusCurrentVersion = ConsensusV34
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/protocol/hash.go b/protocol/hash.go
index 272c538e1..2d7d48acd 100644
--- a/protocol/hash.go
+++ b/protocol/hash.go
@@ -38,11 +38,8 @@ const (
AuctionParams HashID = "aP"
AuctionSettlement HashID = "aS"
- CompactCertCoin HashID = "ccc"
- CompactCertPart HashID = "ccp"
- CompactCertSig HashID = "ccs"
-
AgreementSelector HashID = "AS"
+ BlockHeader256 HashID = "B256"
BlockHeader HashID = "BH"
BalanceRecord HashID = "BR"
Credential HashID = "CR"
@@ -63,9 +60,15 @@ const (
Seed HashID = "SD"
SpecialAddr HashID = "SpecialAddr"
SignedTxnInBlock HashID = "STIB"
- TestHashable HashID = "TE"
- TxGroup HashID = "TG"
- TxnMerkleLeaf HashID = "TL"
- Transaction HashID = "TX"
- Vote HashID = "VO"
+
+ StateProofCoin HashID = "spc"
+ StateProofMessage HashID = "spm"
+ StateProofPart HashID = "spp"
+ StateProofSig HashID = "sps"
+
+ TestHashable HashID = "TE"
+ TxGroup HashID = "TG"
+ TxnMerkleLeaf HashID = "TL"
+ Transaction HashID = "TX"
+ Vote HashID = "VO"
)
diff --git a/protocol/msgp_gen.go b/protocol/msgp_gen.go
index 59df20aa5..9d9a660bb 100644
--- a/protocol/msgp_gen.go
+++ b/protocol/msgp_gen.go
@@ -7,14 +7,6 @@ import (
)
// The following msgp objects are implemented in this file:
-// CompactCertType
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
// ConsensusVersion
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -47,6 +39,14 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// StateProofType
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
// Tag
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -65,52 +65,6 @@ import (
//
// MarshalMsg implements msgp.Marshaler
-func (z CompactCertType) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendUint64(o, uint64(z))
- return
-}
-
-func (_ CompactCertType) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(CompactCertType)
- if !ok {
- _, ok = (z).(*CompactCertType)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *CompactCertType) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var zb0001 uint64
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- (*z) = CompactCertType(zb0001)
- }
- o = bts
- return
-}
-
-func (_ *CompactCertType) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*CompactCertType)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z CompactCertType) Msgsize() (s int) {
- s = msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z CompactCertType) MsgIsZero() bool {
- return z == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
func (z ConsensusVersion) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
@@ -295,6 +249,52 @@ func (z NetworkID) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z StateProofType) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint64(o, uint64(z))
+ return
+}
+
+func (_ StateProofType) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(StateProofType)
+ if !ok {
+ _, ok = (z).(*StateProofType)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *StateProofType) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint64
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = StateProofType(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *StateProofType) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofType)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z StateProofType) Msgsize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z StateProofType) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z Tag) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
diff --git a/protocol/compactcerts.go b/protocol/stateproof.go
index 06e1e3b18..6031b97d4 100644
--- a/protocol/compactcerts.go
+++ b/protocol/stateproof.go
@@ -16,30 +16,29 @@
package protocol
-// A single Algorand chain can support multiple types of compact certs,
+// A single Algorand chain can support multiple types of stateproofs,
// reflecting different hash functions, signature schemes, and frequency
// parameters.
-// CompactCertType identifies a particular configuration of compact certs.
-type CompactCertType uint64
+// StateProofType identifies a particular configuration of state proofs.
+type StateProofType uint64
const (
- // CompactCertBasic is our initial compact cert setup, using Ed25519
- // ephemeral-key signatures and SHA512/256 hashes.
- CompactCertBasic CompactCertType = 0
+ // StateProofBasic is our initial state proof setup. using falcon keys and subset-sum hash
+ StateProofBasic StateProofType = 0
- // NumCompactCertTypes is the max number of types of compact certs
+ // NumStateProofTypes is the max number of types of state proofs
// that we support. This is used as an allocation bound for a map
- // containing different compact cert types in msgpack encoding.
- NumCompactCertTypes int = 1
+ // containing different stateproof types in msgpack encoding.
+ NumStateProofTypes int = 1
)
-// SortCompactCertType implements sorting by CompactCertType keys for
+// SortStateProofType implements sorting by StateProofType keys for
// canonical encoding of maps in msgpack format.
-//msgp:ignore SortCompactCertType
-//msgp:sort CompactCertType SortCompactCertType
-type SortCompactCertType []CompactCertType
+//msgp:ignore SortStateProofType
+//msgp:sort StateProofType SortStateProofType
+type SortStateProofType []StateProofType
-func (a SortCompactCertType) Len() int { return len(a) }
-func (a SortCompactCertType) Less(i, j int) bool { return a[i] < a[j] }
-func (a SortCompactCertType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SortStateProofType) Len() int { return len(a) }
+func (a SortStateProofType) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortStateProofType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/protocol/tags.go b/protocol/tags.go
index 8ae6cfe56..f3bff635b 100644
--- a/protocol/tags.go
+++ b/protocol/tags.go
@@ -27,13 +27,13 @@ type Tag string
const (
UnknownMsgTag Tag = "??"
AgreementVoteTag Tag = "AV"
- CompactCertSigTag Tag = "CS"
MsgOfInterestTag Tag = "MI"
MsgDigestSkipTag Tag = "MS"
NetPrioResponseTag Tag = "NP"
PingTag Tag = "pi"
PingReplyTag Tag = "pj"
ProposalPayloadTag Tag = "PP"
+ StateProofSigTag Tag = "SP"
TopicMsgRespTag Tag = "TS"
TxnTag Tag = "TX"
UniCatchupReqTag Tag = "UC" //Replaced by UniEnsBlockReqTag. Only for backward compatibility.
diff --git a/protocol/txntype.go b/protocol/txntype.go
index 1b50dab8e..434255896 100644
--- a/protocol/txntype.go
+++ b/protocol/txntype.go
@@ -41,8 +41,8 @@ const (
// ApplicationCallTx allows creating, deleting, and interacting with an application
ApplicationCallTx TxType = "appl"
- // CompactCertTx records a compact certificate
- CompactCertTx TxType = "cert"
+ // StateProofTx records a state proof
+ StateProofTx TxType = "stpf"
// UnknownTx signals an error
UnknownTx TxType = "unknown"
diff --git a/scripts/dump_genesis.sh b/scripts/dump_genesis.sh
index 85615de5b..3ee876554 100755
--- a/scripts/dump_genesis.sh
+++ b/scripts/dump_genesis.sh
@@ -1,5 +1,9 @@
#!/usr/bin/env bash
+# Fail if anything goes wrong
+set -e
+set -o pipefail
+
if [ "$1" = "" ]; then
echo "Usage: $0 genesis.json"
exit 1
@@ -11,9 +15,9 @@ trap "rm -r $D" 0
GENJSON="$1"
UNAME=$(uname)
if [[ "${UNAME}" == *"MINGW"* ]]; then
- GOPATH1=$HOME/go
+ GOPATH1=$HOME/go
else
- GOPATH1=$(go env GOPATH | cut -d: -f1)
+ GOPATH1=$(go env GOPATH | cut -d: -f1)
fi
$GOPATH1/bin/algod -d $D -g "$GENJSON" -x >/dev/null
LEDGERS=$D/*/ledger.*sqlite
@@ -39,6 +43,9 @@ for LEDGER in $LEDGERS; do
acctrounds)
SORT=id
;;
+ onlineroundparamstail)
+ SORT=rnd
+ ;;
participationperiods)
SORT=period
;;
@@ -57,15 +64,27 @@ for LEDGER in $LEDGERS; do
resources)
SORT=addrid
;;
+ onlineaccounts)
+ SORT=address
+ ;;
+ txtail)
+ SORT=rnd
+ ;;
+ catchpointfirststageinfo)
+ SORT=round
+ ;;
+ unfinishedcatchpoints)
+ SORT=round
+ ;;
*)
echo "Unknown table $T" >&2
exit 1
;;
esac
- echo ".schema $T" | sqlite3 $LEDGER
+ echo ".schema $T" | sqlite3 "$LEDGER"
( echo .headers on;
- echo .mode insert $T;
- echo "SELECT * FROM $T ORDER BY $SORT;" ) | sqlite3 $LEDGER
+ echo .mode insert "$T";
+ echo "SELECT * FROM $T ORDER BY $SORT;" ) | sqlite3 "$LEDGER"
done
done
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 53c3fee2f..ed0f5825c 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -56,7 +56,7 @@ function runGoLint() {
}
echo "Running go vet..."
-go vet $(go list ./... | grep -v /test/e2e-go/)
+make vet
echo "Running gofmt..."
runGoFmt
@@ -78,6 +78,7 @@ GOPATH=$(go env GOPATH)
"$GOPATH"/bin/algofix -error */
echo "Updating TEAL Specs"
+touch data/transactions/logic/fields_string.go # ensure rebuild
make -C data/transactions/logic
echo "Regenerate REST server"
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 5fcde0373..1592ba6c9 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -77,9 +77,10 @@ func (ppa *pingPongAccount) addBalance(offset int64) {
// WorkerState object holds a running pingpong worker
type WorkerState struct {
- cfg PpConfig
- accounts map[string]*pingPongAccount
- cinfo CreatablesInfo
+ cfg PpConfig
+ accounts map[string]*pingPongAccount
+ accountsMu deadlock.RWMutex
+ cinfo CreatablesInfo
nftStartTime int64
localNftIndex uint64
@@ -152,13 +153,13 @@ func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
fmt.Printf("Not enough accounts - creating %d more\n", int(cfg.NumPartAccounts+1)-len(pps.accounts))
generateAccounts(pps.accounts, cfg.NumPartAccounts)
}
- go pps.roundMonitor(ac)
err = pps.fundAccounts(pps.accounts, ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
}
+ go pps.roundMonitor(ac)
}
pps.cfg = cfg
@@ -174,7 +175,6 @@ func (pps *WorkerState) prepareNewAccounts(client libgoal.Client) (newAccounts m
newAccounts[pps.cfg.SrcAccount] = srcAcct
}
pps.accounts = newAccounts
- go pps.roundMonitor(client)
err = pps.fundAccounts(newAccounts, client, pps.cfg)
if err != nil {
@@ -182,6 +182,7 @@ func (pps *WorkerState) prepareNewAccounts(client libgoal.Client) (newAccounts m
return
}
+ go pps.roundMonitor(client)
return
}
@@ -293,9 +294,12 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
fmt.Printf("adjusting account balance to %d\n", minFund)
+ srcAcct := accounts[cfg.SrcAccount]
+
nextSendTime := time.Now()
for {
accountsAdjusted := 0
+ adjStart := time.Now()
for addr, acct := range accounts {
if addr == pps.cfg.SrcAccount {
continue
@@ -317,7 +321,7 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
schedule(cfg.TxnPerSec, &nextSendTime)
- tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
+ tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
if err != nil {
if strings.Contains(err.Error(), "broadcast queue full") {
fmt.Printf("failed to send payment, broadcast queue full. sleeping & retrying.\n")
@@ -331,16 +335,21 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
if !cfg.Quiet {
fmt.Printf("account balance for key %s will be %d\n", addr, minFund)
}
-
+ acct.setBalance(minFund)
totalSent++
}
accounts[cfg.SrcAccount].setBalance(srcFunds)
+ waitStart := time.Now()
// wait until all the above transactions are sent, or that we have no more transactions
// in our pending transaction pool coming from the source account.
- err = waitPendingTransactions(map[string]*pingPongAccount{cfg.SrcAccount: nil}, client)
+ err = waitPendingTransactions([]string{cfg.SrcAccount}, client)
if err != nil {
return err
}
+ waitStop := time.Now()
+ if !cfg.Quiet {
+ fmt.Printf("%d sent (%s); waited %s\n", accountsAdjusted, waitStart.Sub(adjStart).String(), waitStop.Sub(waitStart).String())
+ }
if accountsAdjusted == 0 {
break
}
@@ -348,21 +357,20 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
return err
}
-func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to string, fee, amount uint64) (transactions.Transaction, error) {
+func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to string, fee, amount uint64, srcAcct *pingPongAccount) (transactions.Transaction, error) {
// generate a unique note to avoid duplicate transaction failures
note := pps.makeNextUniqueNoteField()
- from := pps.cfg.SrcAccount
var txn transactions.Transaction
var stxn transactions.SignedTxn
var err error
- txn, err = client.ConstructPayment(from, to, fee, amount, note, "", [32]byte{}, 0, 0)
+ txn, err = client.ConstructPayment(srcAcct.pk.String(), to, fee, amount, note, "", [32]byte{}, 0, 0)
if err != nil {
return transactions.Transaction{}, err
}
- stxn, err = signTxn(from, txn, pps.accounts, pps.cfg)
+ stxn, err = signTxn(srcAcct, txn, pps.cfg)
if err != nil {
return transactions.Transaction{}, err
@@ -380,8 +388,8 @@ func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to s
// accounts map have been cleared out of the transaction pool. A prerequesite for this is that
// there is no other source who might be generating transactions that would come from these account
// addresses.
-func waitPendingTransactions(accounts map[string]*pingPongAccount, client libgoal.Client) error {
- for from := range accounts {
+func waitPendingTransactions(accounts []string, client libgoal.Client) error {
+ for _, from := range accounts {
repeat:
pendingTxns, err := client.GetPendingTransactionsByAddress(from, 0)
if err != nil {
@@ -403,26 +411,38 @@ func waitPendingTransactions(accounts map[string]*pingPongAccount, client libgoa
return nil
}
-func (pps *WorkerState) refreshAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
+func (pps *WorkerState) refreshAccounts(client libgoal.Client, cfg PpConfig) error {
+ pps.accountsMu.Lock()
+ addrs := make([]string, 0, len(pps.accounts))
+ for addr := range pps.accounts {
+ addrs = append(addrs, addr)
+ }
+ pps.accountsMu.Unlock()
// wait until all the pending transactions have been sent; otherwise, getting the balance
// is pretty much meaningless.
fmt.Printf("waiting for all transactions to be accepted before refreshing accounts.\n")
- err := waitPendingTransactions(accounts, client)
+ err := waitPendingTransactions(addrs, client)
if err != nil {
return err
}
- for addr := range accounts {
+ balanceUpdates := make(map[string]uint64, len(addrs))
+ for _, addr := range addrs {
amount, err := client.GetBalance(addr)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshAccounts: %v\n", err)
return err
}
+ balanceUpdates[addr] = amount
+ }
- accounts[addr].setBalance(amount)
+ pps.accountsMu.Lock()
+ defer pps.accountsMu.Unlock()
+ for addr, amount := range balanceUpdates {
+ pps.accounts[addr].setBalance(amount)
}
- return pps.fundAccounts(accounts, client, cfg)
+ return pps.fundAccounts(pps.accounts, client, cfg)
}
// return a shuffled list of accounts with some minimum balance
@@ -522,12 +542,22 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2
+ pps.accountsMu.RLock()
fromList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
+ pps.accountsMu.RUnlock()
// in group tests txns are sent back and forth, so both parties need funds
+ var toList []string
if cfg.GroupSize == 1 {
minimumAmount = 0
+ pps.accountsMu.RLock()
+ toList = listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
+ pps.accountsMu.RUnlock()
+ } else {
+ // same selection with another shuffle
+ toList = make([]string, len(fromList))
+ copy(toList, fromList)
+ rand.Shuffle(len(toList), func(i, j int) { toList[i], toList[j] = toList[j], toList[i] })
}
- toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
sent, succeeded, err := pps.sendFromTo(fromList, toList, ac, &nextSendTime)
totalSent += sent
@@ -537,7 +567,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
if cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
- err = pps.refreshAccounts(pps.accounts, ac, cfg)
+ err = pps.refreshAccounts(ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err)
}
@@ -601,10 +631,12 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
privateKey := crypto.GenerateSignatureSecrets(seed)
publicKey := basics.Address(privateKey.SignatureVerifier)
+ pps.accountsMu.Lock()
pps.accounts[publicKey.String()] = &pingPongAccount{
sk: privateKey,
pk: publicKey,
}
+ pps.accountsMu.Unlock()
addr = publicKey.String()
fmt.Printf("new NFT holder %s\n", addr)
@@ -617,7 +649,8 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
pps.nftHolders[addr] = 0
var tx transactions.Transaction
- tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
+ srcAcct := pps.acct(pps.cfg.SrcAccount)
+ tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
if err != nil {
return
}
@@ -626,7 +659,9 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
// we ran one txn above already to fund the new addr,
// we'll run a second txn below
}
+ pps.accountsMu.Lock()
pps.accounts[pps.cfg.SrcAccount].addBalance(-int64(srcCost))
+ pps.accountsMu.Unlock()
// pick a random sender from nft holder sub accounts
pick := rand.Intn(len(pps.nftHolders))
pos := 0
@@ -660,7 +695,8 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
} else {
pps.nftHolders[sender] = senderNftCount + 1
}
- stxn, err := signTxn(sender, txn, pps.accounts, pps.cfg)
+ signer := pps.acct(sender)
+ stxn, err := signTxn(signer, txn, pps.cfg)
if err != nil {
return
}
@@ -673,11 +709,16 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
return
}
+func (pps *WorkerState) acct(from string) *pingPongAccount {
+ pps.accountsMu.RLock()
+ defer pps.accountsMu.RUnlock()
+ return pps.accounts[from]
+}
+
func (pps *WorkerState) sendFromTo(
fromList, toList []string,
client libgoal.Client, nextSendTime *time.Time,
) (sentCount, successCount uint64, err error) {
- accounts := pps.accounts
cinfo := pps.cinfo
cfg := pps.cfg
@@ -696,8 +737,7 @@ func (pps *WorkerState) sendFromTo(
*ap = p
assetsByCreator[c] = append(assetsByCreator[c], ap)
}
- for i := 0; i < len(fromList); i = (i + 1) % len(fromList) {
- from := fromList[i]
+ for i, from := range fromList {
// keep going until the balances of at least 20% of the accounts is too low.
if len(belowMinBalanceAccounts)*5 > len(fromList) {
@@ -740,6 +780,7 @@ func (pps *WorkerState) sendFromTo(
amt = 0
}
+ fromAcct := pps.acct(from)
if cfg.GroupSize == 1 {
// generate random assetID or appId if we send asset/app txns
aidx := randomizeCreatableID(cfg, cinfo)
@@ -754,8 +795,8 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if accounts[from].getBalance() <= (txn.Fee.Raw + amt + minAccountRunningBalance) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", amt, from, to, accounts[from].getBalance(), txn.Fee.Raw, amt, minAccountRunningBalance)
+ if fromAcct.getBalance() <= (txn.Fee.Raw + amt + minAccountRunningBalance) {
+ _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", amt, from, to, fromAcct.getBalance(), txn.Fee.Raw, amt, minAccountRunningBalance)
belowMinBalanceAccounts[from] = true
continue
}
@@ -764,7 +805,8 @@ func (pps *WorkerState) sendFromTo(
toBalanceChange = int64(amt)
// Sign txn
- stxn, signErr := signTxn(from, txn, pps.accounts, cfg)
+ signer := pps.acct(from)
+ stxn, signErr := signTxn(signer, txn, cfg)
if signErr != nil {
err = signErr
_, _ = fmt.Fprintf(os.Stderr, "signTxn failed: %v\n", err)
@@ -826,11 +868,12 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if int64(accounts[from].getBalance())+fromBalanceChange <= int64(cfg.MinAccountFunds) {
+ if int64(fromAcct.getBalance())+fromBalanceChange <= int64(cfg.MinAccountFunds) {
_, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d : %s -> %s; Current cost too high.\n", amt, from, to)
continue
}
- if int64(accounts[to].getBalance())+toBalanceChange <= int64(cfg.MinAccountFunds) {
+ toAcct := pps.acct(to)
+ if int64(toAcct.getBalance())+toBalanceChange <= int64(cfg.MinAccountFunds) {
_, _ = fmt.Fprintf(os.Stdout, "Skipping sending back %d : %s -> %s; Current cost too high.\n", amt, to, from)
continue
}
@@ -851,7 +894,8 @@ func (pps *WorkerState) sendFromTo(
var signErr error
for j, txn := range txGroup {
txn.Group = gid
- stxGroup[j], signErr = signTxn(txSigners[j], txn, pps.accounts, cfg)
+ signer := pps.acct(txSigners[j])
+ stxGroup[j], signErr = signTxn(signer, txn, cfg)
if signErr != nil {
err = signErr
return
@@ -871,7 +915,7 @@ func (pps *WorkerState) sendFromTo(
}
successCount++
- accounts[from].addBalance(fromBalanceChange)
+ fromAcct.addBalance(fromBalanceChange)
// avoid updating the "to" account.
}
@@ -918,7 +962,15 @@ func (pps *WorkerState) roundMonitor(client libgoal.Client) {
pps.pendingTxns = pendingTxns
pps.muSuggestedParams.Unlock()
+ // take a quick snapshot of accounts to decrease mutex shadow
+ pps.accountsMu.Lock()
+ accountsSnapshot := make([]*pingPongAccount, 0, len(pps.accounts))
for _, acct := range pps.accounts {
+ accountsSnapshot = append(accountsSnapshot, acct)
+ }
+ pps.accountsMu.Unlock()
+
+ for _, acct := range accountsSnapshot {
acct.Lock()
needRefresh := acct.balance < minFund && acct.balanceRound < paramsResp.LastRound
acct.Unlock()
@@ -1144,17 +1196,17 @@ func (pps *WorkerState) constructPayment(from, to string, fee, amount uint64, no
return tx, nil
}
-func signTxn(signer string, txn transactions.Transaction, accounts map[string]*pingPongAccount, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
+func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
var psig crypto.Signature
if cfg.Rekey {
- stxn, err = txn.Sign(accounts[signer].sk), nil
+ stxn, err = txn.Sign(signer.sk), nil
} else if len(cfg.Program) > 0 {
// If there's a program, sign it and use that in a lsig
progb := logic.Program(cfg.Program)
- psig = accounts[signer].sk.Sign(&progb)
+ psig = signer.sk.Sign(&progb)
// Fill in signed transaction
stxn.Txn = txn
@@ -1164,7 +1216,7 @@ func signTxn(signer string, txn transactions.Transaction, accounts map[string]*p
} else {
// Otherwise, just sign the transaction like normal
- stxn, err = txn.Sign(accounts[signer].sk), nil
+ stxn, err = txn.Sign(signer.sk), nil
}
return
}
diff --git a/compactcert/abstractions.go b/stateproof/abstractions.go
index 9918b0f2e..825b5090e 100644
--- a/compactcert/abstractions.go
+++ b/stateproof/abstractions.go
@@ -14,11 +14,10 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"context"
-
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -41,18 +40,24 @@ type Ledger interface {
Wait(basics.Round) chan struct{}
GenesisHash() crypto.Digest
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
+ VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error)
}
// Network captures the aspects of the gossip network protocol that are
// used by this package.
type Network interface {
- Broadcast(context.Context, protocol.Tag, []byte, bool, network.Peer) error
+ Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error
RegisterHandlers([]network.TaggedMessageHandler)
}
// Accounts captures the aspects of the AccountManager that are used by
// this package.
type Accounts interface {
- StateProofKeys(basics.Round) []account.StateProofRecordForRound
+ StateProofKeys(basics.Round) []account.StateProofSecretsForRound
+ DeleteStateProofKey(id account.ParticipationID, round basics.Round) error
+}
+
+// BlockHeaderFetcher captures the aspects of the Ledger that is used to fetch block headers
+type BlockHeaderFetcher interface {
+ BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error)
}
diff --git a/stateproof/builder.go b/stateproof/builder.go
new file mode 100644
index 000000000..fd800ebaf
--- /dev/null
+++ b/stateproof/builder.go
@@ -0,0 +1,463 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "context"
+ "database/sql"
+ "encoding/binary"
+ "fmt"
+ "sort"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof/verify"
+)
+
+// makeBuilderForRound not threadsafe, should be called in a lock environment
+func (spw *Worker) makeBuilderForRound(rnd basics.Round) (builder, error) {
+ l := spw.ledger
+ hdr, err := l.BlockHdr(rnd)
+ if err != nil {
+ return builder{}, err
+ }
+
+ hdrProto := config.Consensus[hdr.CurrentProtocol]
+ votersRnd := rnd.SubSaturate(basics.Round(hdrProto.StateProofInterval))
+ votersHdr, err := l.BlockHdr(votersRnd)
+ if err != nil {
+ return builder{}, err
+ }
+
+ lookback := votersRnd.SubSaturate(basics.Round(hdrProto.StateProofVotersLookback))
+ voters, err := l.VotersForStateProof(lookback)
+ if err != nil {
+ return builder{}, err
+ }
+
+ if voters == nil {
+ // Voters not tracked for that round. Might not be a valid
+ // state proof round; state proofs might not be enabled; etc.
+ return builder{}, fmt.Errorf("voters not tracked for lookback round %d", lookback)
+ }
+
+ msg, err := GenerateStateProofMessage(l, uint64(votersHdr.Round), hdr)
+ if err != nil {
+ return builder{}, err
+ }
+
+ provenWeight, err := verify.GetProvenWeight(&votersHdr, &hdr)
+ if err != nil {
+ return builder{}, err
+ }
+
+ var res builder
+ res.votersHdr = votersHdr
+ res.voters = voters
+ res.message = msg
+ res.Builder, err = stateproof.MakeBuilder(msg.Hash(),
+ uint64(hdr.Round),
+ provenWeight,
+ voters.Participants,
+ voters.Tree,
+ config.Consensus[votersHdr.CurrentProtocol].StateProofStrengthTarget)
+ if err != nil {
+ return builder{}, err
+ }
+
+ return res, nil
+}
+
+func (spw *Worker) initBuilders() {
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ var roundSigs map[basics.Round][]pendingSig
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx)
+ return
+ })
+ if err != nil {
+ spw.log.Warnf("initBuilders: getPendingSigs: %w", err)
+ return
+ }
+
+ for rnd, sigs := range roundSigs {
+ if _, ok := spw.builders[rnd]; ok {
+ spw.log.Warnf("initBuilders: round %d already present", rnd)
+ continue
+ }
+ spw.addSigsToBuilder(sigs, rnd)
+ }
+}
+
+func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
+ builderForRound, err := spw.makeBuilderForRound(rnd)
+ if err != nil {
+ spw.log.Warnf("addSigsToBuilder: makeBuilderForRound(%d): %v", rnd, err)
+ return
+ }
+ spw.builders[rnd] = builderForRound
+
+ for _, sig := range sigs {
+ pos, ok := builderForRound.voters.AddrToPos[sig.signer]
+ if !ok {
+ spw.log.Warnf("addSigsToBuilder: cannot find %v in round %d", sig.signer, rnd)
+ continue
+ }
+
+ isPresent, err := builderForRound.Present(pos)
+ if err != nil {
+ spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %w ", pos, err)
+ continue
+ }
+ if isPresent {
+ spw.log.Warnf("addSigsToBuilder: cannot add %v in round %d: position %d already added", sig.signer, rnd, pos)
+ continue
+ }
+
+ if err := builderForRound.IsValid(pos, &sig.sig, false); err != nil {
+ spw.log.Warnf("addSigsToBuilder: cannot add %v in round %d: %v", sig.signer, rnd, err)
+ continue
+ }
+ if err := builderForRound.Add(pos, sig.sig); err != nil {
+ spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %w", err)
+ continue
+ }
+ }
+}
+
+func (spw *Worker) handleSigMessage(msg network.IncomingMessage) network.OutgoingMessage {
+ var ssig sigFromAddr
+ err := protocol.Decode(msg.Data, &ssig)
+ if err != nil {
+ spw.log.Warnf("spw.handleSigMessage(): decode: %v", err)
+ return network.OutgoingMessage{Action: network.Disconnect}
+ }
+
+ fwd, err := spw.handleSig(ssig, msg.Sender)
+ if err != nil {
+ spw.log.Warnf("spw.handleSigMessage(): %v", err)
+ }
+
+ return network.OutgoingMessage{Action: fwd}
+}
+
+// handleSig adds a signature to the pending in-memory state proof provers (builders). This function is
+// also responsible for making sure that the signature is valid, and not duplicated.
+// if a signature passes all verification it is written into the database.
+func (spw *Worker) handleSig(sfa sigFromAddr, sender network.Peer) (network.ForwardingPolicy, error) {
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ builderForRound, ok := spw.builders[sfa.Round]
+ if !ok {
+ latest := spw.ledger.Latest()
+ latestHdr, err := spw.ledger.BlockHdr(latest)
+ if err != nil {
+ return network.Ignore, err
+ }
+
+ if sfa.Round < latestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound {
+ // Already have a complete state proof in ledger.
+ // Ignore this sig.
+ return network.Ignore, nil
+ }
+
+ // The sig should be for a round which is a multiple of StateProofInterval
+ // using the latestHdr protocol, since changing StateProofInterval is not supported
+ proto := config.Consensus[latestHdr.CurrentProtocol]
+
+ // proto.StateProofInterval is not expected to be 0 after passing StateProofNextRound
+ // checking anyway, otherwise will panic
+ if proto.StateProofInterval == 0 {
+ return network.Disconnect, fmt.Errorf("handleSig: StateProofInterval is 0 for round %d", latest)
+ }
+
+ if uint64(sfa.Round)%proto.StateProofInterval != 0 {
+ // reject the sig for the round which is not a multiple of the interval
+ // Disconnect: should not be sending a sig for this round
+ return network.Disconnect, fmt.Errorf("handleSig: round %d is not a multiple of SP interval %d",
+ sfa.Round, proto.StateProofInterval)
+ }
+
+ builderForRound, err = spw.makeBuilderForRound(sfa.Round)
+ if err != nil {
+ // Should not disconnect this peer, since this is a fault of the relay
+ // The peer could have other signatures what the relay is interested in
+ return network.Ignore, err
+ }
+ spw.builders[sfa.Round] = builderForRound
+ spw.log.Infof("spw.handleSig: starts gathering signatures for round %d", sfa.Round)
+ }
+
+ pos, ok := builderForRound.voters.AddrToPos[sfa.SignerAddress]
+ if !ok {
+ return network.Disconnect, fmt.Errorf("handleSig: %v not in participants for %d", sfa.SignerAddress, sfa.Round)
+ }
+
+ if isPresent, err := builderForRound.Present(pos); err != nil || isPresent {
+ // Signature already part of the builderForRound, ignore.
+ return network.Ignore, nil
+ }
+
+ if err := builderForRound.IsValid(pos, &sfa.Sig, true); err != nil {
+ return network.Disconnect, err
+ }
+
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return addPendingSig(tx, sfa.Round, pendingSig{
+ signer: sfa.SignerAddress,
+ sig: sfa.Sig,
+ fromThisNode: sender == nil,
+ })
+ })
+ if err != nil {
+ return network.Ignore, err
+ }
+ // validated that we can add the sig previously.
+ if err := builderForRound.Add(pos, sfa.Sig); err != nil {
+ // only Present called from Add returns an error which is already
+ // passed in the call above.
+ return network.Ignore, err
+ }
+ return network.Broadcast, nil
+}
+
+func (spw *Worker) builder(latest basics.Round) {
+ // We clock the building of state proofs based on new
+ // blocks. This is because the acceptable state proof
+ // size grows over time, so that we aim to construct an extremely
+ // small state proof upfront, but if that doesn't work out, we
+ // will settle for a larger proof. New blocks also tell us
+ // if a state proof has been committed, so that we can stop trying
+ // to build it.
+ for {
+ spw.tryBroadcast()
+
+ nextrnd := latest + 1
+ select {
+ case <-spw.ctx.Done():
+ spw.wg.Done()
+ return
+
+ case <-spw.ledger.Wait(nextrnd):
+ // Continue on
+ }
+
+ // See if any new state proofs were formed, according to
+ // the new block, which would mean we can clean up some builders.
+ hdr, err := spw.ledger.BlockHdr(nextrnd)
+ if err != nil {
+ spw.log.Warnf("spw.builder: BlockHdr(%d): %v", nextrnd, err)
+ continue
+ }
+
+ spw.deleteOldSigs(&hdr)
+ spw.deleteOldBuilders(&hdr)
+
+ // Broadcast signatures based on the previous block(s) that
+ // were agreed upon. This ensures that, if we send a signature
+ // for block R, nodes will have already verified block R, because
+ // block R+1 has been formed.
+ proto := config.Consensus[hdr.CurrentProtocol]
+ newLatest := spw.ledger.Latest()
+ for r := latest; r < newLatest; r++ {
+ // Wait for the signer to catch up; mostly relevant in tests.
+ spw.waitForSignature(r)
+
+ spw.broadcastSigs(r, proto)
+ }
+ latest = newLatest
+ }
+}
+
+// broadcastSigs periodically broadcasts pending signatures for rounds
+// that have not been able to form a state proof.
+//
+// Signature re-broadcasting happens in periods of proto.StateProofInterval
+// rounds.
+//
+// In the first half of each such period, signers of a block broadcast their
+// own signatures; this is the expected common path.
+//
+// In the second half of each such period, any signatures seen by this node
+// are broadcast.
+//
+// The broadcast schedule is randomized by the address of the block signer,
+// for load-balancing over time.
+func (spw *Worker) broadcastSigs(brnd basics.Round, proto config.ConsensusParams) {
+ if proto.StateProofInterval == 0 {
+ return
+ }
+
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ var roundSigs map[basics.Round][]pendingSig
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ if brnd%basics.Round(proto.StateProofInterval) < basics.Round(proto.StateProofInterval/2) {
+ roundSigs, err = getPendingSigsFromThisNode(tx)
+ } else {
+ roundSigs, err = getPendingSigs(tx)
+ }
+ return
+ })
+ if err != nil {
+ spw.log.Warnf("broadcastSigs: getPendingSigs: %v", err)
+ return
+ }
+
+ for rnd, sigs := range roundSigs {
+ if rnd > brnd {
+ // Signature is for later block than brnd. This could happen
+ // during catchup or testing. The caller's loop will eventually
+ // invoke this function with a suitably high brnd.
+ continue
+ }
+
+ for _, sig := range sigs {
+ // Randomize which sigs get broadcast over time.
+ addr64 := binary.LittleEndian.Uint64(sig.signer[:])
+ if addr64%(proto.StateProofInterval/2) != uint64(brnd)%(proto.StateProofInterval/2) {
+ continue
+ }
+
+ sfa := sigFromAddr{
+ SignerAddress: sig.signer,
+ Round: rnd,
+ Sig: sig.sig,
+ }
+ err = spw.net.Broadcast(context.Background(), protocol.StateProofSigTag,
+ protocol.Encode(&sfa), false, nil)
+ if err != nil {
+ spw.log.Warnf("broadcastSigs: Broadcast for %d: %v", rnd, err)
+ }
+ }
+ }
+}
+
+func (spw *Worker) deleteOldSigs(currentHdr *bookkeeping.BlockHeader) {
+ oldestRoundToRemove := GetOldestExpectedStateProof(currentHdr)
+
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return deletePendingSigsBeforeRound(tx, oldestRoundToRemove)
+ })
+ if err != nil {
+ spw.log.Warnf("deletePendingSigsBeforeRound(%d): %v", oldestRoundToRemove, err)
+ }
+}
+
+func (spw *Worker) deleteOldBuilders(currentHdr *bookkeeping.BlockHeader) {
+ oldestRoundToRemove := GetOldestExpectedStateProof(currentHdr)
+
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ for rnd := range spw.builders {
+ if rnd < oldestRoundToRemove {
+ delete(spw.builders, rnd)
+ }
+ }
+}
+
+func (spw *Worker) tryBroadcast() {
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ sortedRounds := make([]basics.Round, 0, len(spw.builders))
+ for rnd := range spw.builders {
+ sortedRounds = append(sortedRounds, rnd)
+ }
+ sort.Slice(sortedRounds, func(i, j int) bool { return sortedRounds[i] < sortedRounds[j] })
+
+ for _, rnd := range sortedRounds { // Iterate over the builders in a sequential manner
+ b := spw.builders[rnd]
+ firstValid := spw.ledger.Latest()
+ acceptableWeight := verify.AcceptableStateProofWeight(&b.votersHdr, firstValid, logging.Base())
+ if b.SignedWeight() < acceptableWeight {
+ // Haven't signed enough to build the state proof at this time..
+ continue
+ }
+
+ if !b.Ready() {
+ // Haven't gotten enough signatures to get past ProvenWeight
+ continue
+ }
+
+ sp, err := b.Build()
+ if err != nil {
+ spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %w", rnd, err)
+ continue
+ }
+
+ spw.log.Infof("spw.tryBroadcast: building state proof transaction for round %d", rnd)
+ var stxn transactions.SignedTxn
+ stxn.Txn.Type = protocol.StateProofTx
+ stxn.Txn.Sender = transactions.StateProofSender
+ stxn.Txn.FirstValid = firstValid
+ stxn.Txn.LastValid = firstValid + basics.Round(b.voters.Proto.MaxTxnLife)
+ stxn.Txn.GenesisHash = spw.ledger.GenesisHash()
+ stxn.Txn.StateProofTxnFields.StateProofType = protocol.StateProofBasic
+ stxn.Txn.StateProofTxnFields.StateProof = *sp
+ stxn.Txn.StateProofTxnFields.Message = b.message
+ err = spw.txnSender.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn})
+ if err != nil {
+ spw.log.Warnf("spw.tryBroadcast: broadcasting state proof txn for %d: %v", rnd, err)
+ // if this StateProofTxn was rejected, the next one would be rejected as well since state proof should be added in
+ // a sequential order
+ break
+ }
+ }
+}
+
+func (spw *Worker) invokeBuilder(r basics.Round) {
+ spw.mu.Lock()
+ spw.signed = r
+ spw.mu.Unlock()
+
+ select {
+ case spw.signedCh <- struct{}{}:
+ default:
+ }
+}
+
+func (spw *Worker) lastSignedBlock() basics.Round {
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+ return spw.signed
+}
+
+func (spw *Worker) waitForSignature(r basics.Round) {
+ for {
+ if r <= spw.lastSignedBlock() {
+ return
+ }
+
+ select {
+ case <-spw.ctx.Done():
+ return
+ case <-spw.signedCh:
+ }
+ }
+}
diff --git a/compactcert/db.go b/stateproof/db.go
index 4d71ea570..fa10c8c5e 100644
--- a/compactcert/db.go
+++ b/stateproof/db.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"database/sql"
@@ -26,21 +26,21 @@ import (
)
var schema = []string{
- // sigs tracks signatures used to build a compact certificate, for
- // rounds that have not formed a compact certificate yet.
+ // sigs tracks signatures used to build a state proofs, for
+ // rounds that have not formed a state proofs yet.
//
- // There can be at most one signature for a given (certrnd, signer):
+ // There can be at most one signature for a given (sprnd, signer):
// that is, a signer (account address) can produce at most one signature
- // for a given certrnd (the round of the block being signed).
+ // for a given sprnd (the round of the block being signed).
//
// Signatures produced by this node are special because we broadcast
// them early; other signatures are retransmitted later on.
`CREATE TABLE IF NOT EXISTS sigs (
- certrnd integer,
+ sprnd integer,
signer blob,
sig blob,
from_this_node integer,
- UNIQUE (certrnd, signer))`,
+ UNIQUE (sprnd, signer))`,
`CREATE INDEX IF NOT EXISTS sigs_from_this_node ON sigs (from_this_node)`,
}
@@ -55,7 +55,7 @@ func initDB(tx *sql.Tx) error {
for i, tableCreate := range schema {
_, err := tx.Exec(tableCreate)
if err != nil {
- return fmt.Errorf("could not create compactcert table %d: %v", i, err)
+ return fmt.Errorf("could not state proof table %d: %v", i, err)
}
}
@@ -63,7 +63,7 @@ func initDB(tx *sql.Tx) error {
}
func addPendingSig(tx *sql.Tx, rnd basics.Round, psig pendingSig) error {
- _, err := tx.Exec("INSERT INTO sigs (certrnd, signer, sig, from_this_node) VALUES (?, ?, ?, ?)",
+ _, err := tx.Exec("INSERT INTO sigs (sprnd, signer, sig, from_this_node) VALUES (?, ?, ?, ?)",
rnd,
psig.signer[:],
protocol.Encode(&psig.sig),
@@ -72,12 +72,12 @@ func addPendingSig(tx *sql.Tx, rnd basics.Round, psig pendingSig) error {
}
func deletePendingSigsBeforeRound(tx *sql.Tx, rnd basics.Round) error {
- _, err := tx.Exec("DELETE FROM sigs WHERE certrnd<?", rnd)
+ _, err := tx.Exec("DELETE FROM sigs WHERE sprnd<?", rnd)
return err
}
func getPendingSigs(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
- rows, err := tx.Query("SELECT certrnd, signer, sig, from_this_node FROM sigs")
+ rows, err := tx.Query("SELECT sprnd, signer, sig, from_this_node FROM sigs")
if err != nil {
return nil, err
}
@@ -87,7 +87,7 @@ func getPendingSigs(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
}
func getPendingSigsFromThisNode(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
- rows, err := tx.Query("SELECT certrnd, signer, sig, from_this_node FROM sigs WHERE from_this_node=1")
+ rows, err := tx.Query("SELECT sprnd, signer, sig, from_this_node FROM sigs WHERE from_this_node=1")
if err != nil {
return nil, err
}
diff --git a/compactcert/db_test.go b/stateproof/db_test.go
index dee7271d7..8dca09af4 100644
--- a/compactcert/db_test.go
+++ b/stateproof/db_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"context"
diff --git a/compactcert/msgp_gen.go b/stateproof/msgp_gen.go
index 9099e8bb6..75abd4aa7 100644
--- a/compactcert/msgp_gen.go
+++ b/stateproof/msgp_gen.go
@@ -1,4 +1,4 @@
-package compactcert
+package stateproof
// Code generated by github.com/algorand/msgp DO NOT EDIT.
@@ -22,15 +22,15 @@ func (z *sigFromAddr) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0001Len := uint32(3)
var zb0001Mask uint8 /* 4 bits */
- if (*z).Round.MsgIsZero() {
+ if (*z).SignerAddress.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x2
}
- if (*z).Sig.MsgIsZero() {
+ if (*z).Round.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x4
}
- if (*z).Signer.MsgIsZero() {
+ if (*z).Sig.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x8
}
@@ -38,19 +38,19 @@ func (z *sigFromAddr) MarshalMsg(b []byte) (o []byte) {
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len != 0 {
if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "rnd"
- o = append(o, 0xa3, 0x72, 0x6e, 0x64)
- o = (*z).Round.MarshalMsg(o)
+ // string "a"
+ o = append(o, 0xa1, 0x61)
+ o = (*z).SignerAddress.MarshalMsg(o)
}
if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "sig"
- o = append(o, 0xa3, 0x73, 0x69, 0x67)
- o = (*z).Sig.MarshalMsg(o)
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ o = (*z).Round.MarshalMsg(o)
}
if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "signer"
- o = append(o, 0xa6, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72)
- o = (*z).Signer.MarshalMsg(o)
+ // string "s"
+ o = append(o, 0xa1, 0x73)
+ o = (*z).Sig.MarshalMsg(o)
}
}
return
@@ -76,9 +76,9 @@ func (z *sigFromAddr) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
- bts, err = (*z).Signer.UnmarshalMsg(bts)
+ bts, err = (*z).SignerAddress.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Signer")
+ err = msgp.WrapError(err, "struct-from-array", "SignerAddress")
return
}
}
@@ -121,19 +121,19 @@ func (z *sigFromAddr) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
switch string(field) {
- case "signer":
- bts, err = (*z).Signer.UnmarshalMsg(bts)
+ case "a":
+ bts, err = (*z).SignerAddress.UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "Signer")
+ err = msgp.WrapError(err, "SignerAddress")
return
}
- case "rnd":
+ case "r":
bts, err = (*z).Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Round")
return
}
- case "sig":
+ case "s":
bts, err = (*z).Sig.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Sig")
@@ -159,11 +159,11 @@ func (_ *sigFromAddr) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *sigFromAddr) Msgsize() (s int) {
- s = 1 + 7 + (*z).Signer.Msgsize() + 4 + (*z).Round.Msgsize() + 4 + (*z).Sig.Msgsize()
+ s = 1 + 2 + (*z).SignerAddress.Msgsize() + 2 + (*z).Round.Msgsize() + 2 + (*z).Sig.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *sigFromAddr) MsgIsZero() bool {
- return ((*z).Signer.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Sig.MsgIsZero())
+ return ((*z).SignerAddress.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
diff --git a/compactcert/msgp_gen_test.go b/stateproof/msgp_gen_test.go
index 0f206656e..ed1bd8206 100644
--- a/compactcert/msgp_gen_test.go
+++ b/stateproof/msgp_gen_test.go
@@ -1,7 +1,7 @@
//go:build !skip_msgp_testing
// +build !skip_msgp_testing
-package compactcert
+package stateproof
// Code generated by github.com/algorand/msgp DO NOT EDIT.
diff --git a/stateproof/recovery.go b/stateproof/recovery.go
new file mode 100644
index 000000000..5902a603e
--- /dev/null
+++ b/stateproof/recovery.go
@@ -0,0 +1,42 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// GetOldestExpectedStateProof returns the lowest round for which the node should create a state proof.
+func GetOldestExpectedStateProof(latestHeader *bookkeeping.BlockHeader) basics.Round {
+ proto := config.Consensus[latestHeader.CurrentProtocol]
+ if proto.StateProofInterval == 0 {
+ return 0
+ }
+
+ recentRoundOnRecoveryPeriod := basics.Round(uint64(latestHeader.Round) - uint64(latestHeader.Round)%proto.StateProofInterval)
+ oldestRoundOnRecoveryPeriod := recentRoundOnRecoveryPeriod.SubSaturate(basics.Round(proto.StateProofInterval * (proto.StateProofMaxRecoveryIntervals)))
+
+ nextStateproofRound := latestHeader.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+
+ if nextStateproofRound > oldestRoundOnRecoveryPeriod {
+ return nextStateproofRound
+ }
+ return oldestRoundOnRecoveryPeriod
+}
diff --git a/stateproof/signer.go b/stateproof/signer.go
new file mode 100644
index 000000000..697a56e9e
--- /dev/null
+++ b/stateproof/signer.go
@@ -0,0 +1,177 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// sigFromAddr encapsulates a signature on a block header, which
+// will eventually be used to form a state proof for that
+// block.
+type sigFromAddr struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ SignerAddress basics.Address `codec:"a"`
+ Round basics.Round `codec:"r"`
+ Sig merklesignature.Signature `codec:"s"`
+}
+
+func (spw *Worker) signer(latest basics.Round) {
+ nextRnd := spw.nextStateProofRound(latest)
+ for { // Start signing StateProofs from nextRnd onwards
+ select {
+ case <-spw.ledger.Wait(nextRnd):
+ hdr, err := spw.ledger.BlockHdr(nextRnd)
+ if err != nil {
+ spw.log.Warnf("spw.signer(): BlockHdr(next %d): %v", nextRnd, err)
+ time.Sleep(1 * time.Second)
+ nextRnd = spw.nextStateProofRound(spw.ledger.Latest())
+ continue
+ }
+ spw.signStateProof(hdr)
+ spw.invokeBuilder(nextRnd)
+ nextRnd++
+
+ case <-spw.ctx.Done():
+ spw.wg.Done()
+ return
+ }
+ }
+}
+
+func (spw *Worker) nextStateProofRound(latest basics.Round) basics.Round {
+ var nextrnd basics.Round
+
+ for {
+ latestHdr, err := spw.ledger.BlockHdr(latest)
+ if err != nil {
+ spw.log.Warnf("spw.signer(): BlockHdr(latest %d): %v", latest, err)
+ time.Sleep(1 * time.Second)
+ latest = spw.ledger.Latest()
+ continue
+ }
+
+ nextrnd = latestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+ if nextrnd == 0 {
+ // State proofs are not enabled yet. Keep monitoring new blocks.
+ nextrnd = latest + 1
+ }
+ break
+ }
+
+ return nextrnd
+}
+
+func (spw *Worker) signStateProof(hdr bookkeeping.BlockHeader) {
+ proto := config.Consensus[hdr.CurrentProtocol]
+ if proto.StateProofInterval == 0 {
+ return
+ }
+
+ // Only sign blocks that are a multiple of StateProofInterval.
+ if hdr.Round%basics.Round(proto.StateProofInterval) != 0 {
+ return
+ }
+
+ keys := spw.accts.StateProofKeys(hdr.Round)
+ if len(keys) == 0 {
+ // No keys, nothing to do.
+ return
+ }
+
+ // votersRound is the round containing the merkle root commitment
+ // for the voters that are going to sign this block.
+ votersRound := hdr.Round.SubSaturate(basics.Round(proto.StateProofInterval))
+ votersHdr, err := spw.ledger.BlockHdr(votersRound)
+ if err != nil {
+ spw.log.Warnf("spw.signBlock(%d): BlockHdr(%d): %v", hdr.Round, votersRound, err)
+ return
+ }
+
+ if votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment.IsEmpty() {
+ // No voter commitment, perhaps because state proofs were
+ // just enabled.
+ return
+ }
+
+ sigs := make([]sigFromAddr, 0, len(keys))
+ ids := make([]account.ParticipationID, 0, len(keys))
+ usedSigners := make([]*merklesignature.Signer, 0, len(keys))
+
+ stateproofMessage, err := GenerateStateProofMessage(spw.ledger, uint64(votersHdr.Round), hdr)
+ if err != nil {
+ spw.log.Warnf("spw.signBlock(%d): GenerateStateProofMessage: %v", hdr.Round, err)
+ return
+ }
+ hashedStateproofMessage := stateproofMessage.Hash()
+
+ for _, key := range keys {
+ if key.FirstValid > hdr.Round || hdr.Round > key.LastValid {
+ continue
+ }
+
+ if key.StateProofSecrets == nil {
+ spw.log.Warnf("spw.signBlock(%d): empty state proof secrets for round", hdr.Round)
+ continue
+ }
+
+ sig, err := key.StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ if err != nil {
+ spw.log.Warnf("spw.signBlock(%d): StateProofSecrets.Sign: %v", hdr.Round, err)
+ continue
+ }
+
+ sigs = append(sigs, sigFromAddr{
+ SignerAddress: key.Account,
+ Round: hdr.Round,
+ Sig: sig,
+ })
+ ids = append(ids, key.ParticipationID)
+ usedSigners = append(usedSigners, key.StateProofSecrets)
+ }
+
+ // any error in handle sig indicates the signature wasn't stored in disk, thus we cannot delete the key.
+ for i, sfa := range sigs {
+ if _, err := spw.handleSig(sfa, nil); err != nil {
+ spw.log.Warnf("spw.signBlock(%d): handleSig: %v", hdr.Round, err)
+ continue
+ }
+
+ spw.log.Infof("spw.signBlock(%d): sp message was signed with address %v", hdr.Round, sfa.SignerAddress)
+ firstRoundInKeyLifetime, err := usedSigners[i].FirstRoundInKeyLifetime() // Calculate first round of the key in order to delete all previous keys (and keep the current one for now)
+ if err != nil {
+ spw.log.Warnf("spw.signBlock(%d): Signer.FirstRoundInKeyLifetime: %v", hdr.Round, err)
+ continue
+ }
+ if firstRoundInKeyLifetime == 0 {
+ continue // No previous keys to delete (also underflows when subtracting 1)
+ }
+
+ // Safe to delete key for sfa.Round because the signature is now stored in the disk.
+ if err := spw.accts.DeleteStateProofKey(ids[i], basics.Round(firstRoundInKeyLifetime-1)); err != nil { // Subtract 1 to delete all keys up to this one
+ spw.log.Warnf("spw.signBlock(%d): DeleteStateProofKey: %v", hdr.Round, err)
+ }
+ }
+}
diff --git a/stateproof/stateproofMessageGenerator.go b/stateproof/stateproofMessageGenerator.go
new file mode 100644
index 000000000..8befeb72f
--- /dev/null
+++ b/stateproof/stateproofMessageGenerator.go
@@ -0,0 +1,146 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var errInvalidParams = errors.New("provided parameters are invalid")
+var errOutOfBound = errors.New("request pos is out of array bounds")
+var errProvenWeightOverflow = errors.New("overflow computing provenWeight")
+
+// The Array implementation for block headers, required to build the merkle tree from them.
+//msgp:ignore lightBlockHeaders
+type lightBlockHeaders []bookkeeping.LightBlockHeader
+
+func (b lightBlockHeaders) Length() uint64 {
+ return uint64(len(b))
+}
+
+func (b lightBlockHeaders) Marshal(pos uint64) (crypto.Hashable, error) {
+ if pos >= b.Length() {
+ return nil, fmt.Errorf("%w: pos - %d, array length - %d", errOutOfBound, pos, b.Length())
+ }
+ return &b[pos], nil
+}
+
+// GenerateStateProofMessage returns a stateproof message that contains all the necessary data for proving on Algorand's state.
+// In addition, it also includes the trusted data for the next stateproof verification
+func GenerateStateProofMessage(l BlockHeaderFetcher, votersRound uint64, latestRoundHeader bookkeeping.BlockHeader) (stateproofmsg.Message, error) {
+ proto := config.Consensus[latestRoundHeader.CurrentProtocol]
+ commitment, err := createHeaderCommitment(l, &proto, &latestRoundHeader)
+ if err != nil {
+ return stateproofmsg.Message{}, err
+ }
+
+ lnProvenWeight, err := calculateLnProvenWeight(&latestRoundHeader, &proto)
+ if err != nil {
+ return stateproofmsg.Message{}, err
+ }
+
+ return stateproofmsg.Message{
+ BlockHeadersCommitment: commitment.ToSlice(),
+ VotersCommitment: latestRoundHeader.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ LnProvenWeight: lnProvenWeight,
+ FirstAttestedRound: votersRound + 1,
+ LastAttestedRound: uint64(latestRoundHeader.Round),
+ }, nil
+}
+
+func calculateLnProvenWeight(latestRoundInInterval *bookkeeping.BlockHeader, proto *config.ConsensusParams) (uint64, error) {
+ totalWeight := latestRoundInInterval.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
+ provenWeight, overflowed := basics.Muldiv(totalWeight, uint64(proto.StateProofWeightThreshold), 1<<32)
+ if overflowed {
+ err := fmt.Errorf("calculateLnProvenWeight err: %w - %d %d * %d / (1<<32)",
+ errProvenWeightOverflow, latestRoundInInterval.Round, totalWeight, proto.StateProofWeightThreshold)
+ return 0, err
+ }
+
+ lnProvenWeight, err := stateproof.LnIntApproximation(provenWeight)
+ if err != nil {
+ return 0, err
+ }
+ return lnProvenWeight, nil
+}
+
+func createHeaderCommitment(l BlockHeaderFetcher, proto *config.ConsensusParams, latestRoundHeader *bookkeeping.BlockHeader) (crypto.GenericDigest, error) {
+ stateProofInterval := proto.StateProofInterval
+
+ if latestRoundHeader.Round < basics.Round(stateProofInterval) {
+ return nil, fmt.Errorf("createHeaderCommitment stateProofRound must be >= than stateproofInterval (%w)", errInvalidParams)
+ }
+
+ var lightHeaders lightBlockHeaders
+ lightHeaders, err := FetchLightHeaders(l, stateProofInterval, latestRoundHeader.Round)
+ if err != nil {
+ return crypto.GenericDigest{}, err
+ }
+
+ // Build merkle tree from encoded headers
+ tree, err := merklearray.BuildVectorCommitmentTree(
+ lightHeaders,
+ crypto.HashFactory{HashType: crypto.Sha256},
+ )
+ if err != nil {
+ return nil, err
+ }
+ return tree.Root(), nil
+}
+
+// FetchLightHeaders returns the headers of the blocks in the interval
+func FetchLightHeaders(l BlockHeaderFetcher, stateProofInterval uint64, latestRound basics.Round) ([]bookkeeping.LightBlockHeader, error) {
+ blkHdrArr := make(lightBlockHeaders, stateProofInterval)
+ firstRound := latestRound - basics.Round(stateProofInterval) + 1
+
+ for i := uint64(0); i < stateProofInterval; i++ {
+ rnd := firstRound + basics.Round(i)
+ hdr, err := l.BlockHdr(rnd)
+ if err != nil {
+ return nil, err
+ }
+ blkHdrArr[i] = hdr.ToLightBlockHeader()
+ }
+ return blkHdrArr, nil
+}
+
+// GenerateProofOfLightBlockHeaders sets up a tree over the blkHdrArr and returns merkle proof over one of the blocks.
+func GenerateProofOfLightBlockHeaders(stateProofInterval uint64, blkHdrArr lightBlockHeaders, blockIndex uint64) (*merklearray.SingleLeafProof, error) {
+ if blkHdrArr.Length() != stateProofInterval {
+ return nil, fmt.Errorf("received wrong amount of block headers. err: %w - %d != %d", errInvalidParams, blkHdrArr.Length(), stateProofInterval)
+ }
+
+ tree, err := merklearray.BuildVectorCommitmentTree(
+ blkHdrArr,
+ crypto.HashFactory{HashType: crypto.Sha256},
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return tree.ProveSingleLeaf(blockIndex)
+}
diff --git a/stateproof/stateproofMessageGenerator_test.go b/stateproof/stateproofMessageGenerator_test.go
new file mode 100644
index 000000000..1b909d775
--- /dev/null
+++ b/stateproof/stateproofMessageGenerator_test.go
@@ -0,0 +1,409 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type workerForStateProofMessageTests struct {
+ w *testWorkerStubs
+}
+
+func (s *workerForStateProofMessageTests) StateProofKeys(round basics.Round) []account.StateProofSecretsForRound {
+ return s.w.StateProofKeys(round)
+}
+
+func (s *workerForStateProofMessageTests) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
+ return s.w.DeleteStateProofKey(id, round)
+}
+
+func (s *workerForStateProofMessageTests) Latest() basics.Round {
+ return s.w.Latest()
+}
+
+func (s *workerForStateProofMessageTests) Wait(round basics.Round) chan struct{} {
+ return s.w.Wait(round)
+}
+
+func (s *workerForStateProofMessageTests) GenesisHash() crypto.Digest {
+ return s.w.GenesisHash()
+}
+
+func (s *workerForStateProofMessageTests) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) {
+ s.w.mu.Lock()
+ defer s.w.mu.Unlock()
+
+ element, ok := s.w.blocks[round]
+ if !ok {
+ return bookkeeping.BlockHeader{}, ledgercore.ErrNoEntry{Round: round}
+ }
+ return element, nil
+}
+
+func (s *workerForStateProofMessageTests) VotersForStateProof(round basics.Round) (*ledgercore.VotersForRound, error) {
+ voters := &ledgercore.VotersForRound{
+ Proto: config.Consensus[protocol.ConsensusCurrentVersion],
+ AddrToPos: make(map[basics.Address]uint64),
+ }
+
+ wt := uint64(0)
+ for i, k := range s.w.keysForVoters {
+ partWe := uint64((len(s.w.keysForVoters) + int(round) - i) * 10000)
+ voters.AddrToPos[k.Parent] = uint64(i)
+ voters.Participants = append(voters.Participants, basics.Participant{
+ PK: *k.StateProofSecrets.GetVerifier(),
+ Weight: partWe,
+ })
+ wt += partWe
+ }
+
+ tree, err := merklearray.BuildVectorCommitmentTree(voters.Participants, crypto.HashFactory{HashType: stateproof.HashType})
+ if err != nil {
+ return nil, err
+ }
+
+ voters.Tree = tree
+ voters.TotalWeight = basics.MicroAlgos{Raw: wt}
+ return voters, nil
+}
+
+func (s *workerForStateProofMessageTests) Broadcast(ctx context.Context, tag protocol.Tag, bytes []byte, b bool, peer network.Peer) error {
+ return s.w.Broadcast(ctx, tag, bytes, b, peer)
+}
+
+func (s *workerForStateProofMessageTests) RegisterHandlers(handlers []network.TaggedMessageHandler) {
+ s.w.RegisterHandlers(handlers)
+}
+
+func (s *workerForStateProofMessageTests) BroadcastInternalSignedTxGroup(txns []transactions.SignedTxn) error {
+ return s.w.BroadcastInternalSignedTxGroup(txns)
+}
+
+func (s *workerForStateProofMessageTests) addBlockWithStateProofHeaders(ccNextRound basics.Round) {
+
+ s.w.latest++
+
+ hdr := bookkeeping.BlockHeader{}
+ hdr.Round = s.w.latest
+ hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
+
+ var ccBasic = bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: make([]byte, stateproof.HashSize),
+ StateProofOnlineTotalWeight: basics.MicroAlgos{},
+ StateProofNextRound: 0,
+ }
+
+ if uint64(hdr.Round)%config.Consensus[hdr.CurrentProtocol].StateProofInterval == 0 {
+ voters, _ := s.VotersForStateProof(hdr.Round.SubSaturate(basics.Round(config.Consensus[hdr.CurrentProtocol].StateProofVotersLookback)))
+ ccBasic.StateProofVotersCommitment = voters.Tree.Root()
+ ccBasic.StateProofOnlineTotalWeight = voters.TotalWeight
+
+ }
+
+ ccBasic.StateProofNextRound = ccNextRound
+ hdr.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: ccBasic,
+ }
+
+ s.w.blocks[s.w.latest] = hdr
+ if s.w.waiters[s.w.latest] != nil {
+ close(s.w.waiters[s.w.latest])
+ }
+}
+
+func newWorkerForStateProofMessageStubs(keys []account.Participation, totalWeight int) *workerForStateProofMessageTests {
+ s := &testWorkerStubs{
+ t: nil,
+ mu: deadlock.Mutex{},
+ latest: 0,
+ waiters: make(map[basics.Round]chan struct{}),
+ waitersCount: make(map[basics.Round]int),
+ blocks: make(map[basics.Round]bookkeeping.BlockHeader),
+ keys: keys,
+ keysForVoters: keys,
+ sigmsg: make(chan []byte, 1024),
+ txmsg: make(chan transactions.SignedTxn, 1024),
+ totalWeight: totalWeight,
+ deletedStateProofKeys: map[account.ParticipationID]basics.Round{},
+ }
+ sm := workerForStateProofMessageTests{w: s}
+ return &sm
+}
+
+func (s *workerForStateProofMessageTests) advanceLatest(delta uint64) {
+ s.w.mu.Lock()
+ defer s.w.mu.Unlock()
+
+ for r := uint64(0); r < delta; r++ {
+ s.addBlockWithStateProofHeaders(s.w.blocks[s.w.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ }
+}
+
+func TestStateProofMessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys, len(keys))
+ dbs, _ := dbOpenTest(t, true)
+ w := NewWorker(dbs.Wdb, logging.TestingLog(t), s, s, s, s)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.w.latest--
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ w.Start()
+ defer w.Shutdown()
+
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ var lastMessage stateproofmsg.Message
+
+ for iter := uint64(0); iter < 5; iter++ {
+ s.advanceLatest(proto.StateProofInterval)
+
+ for {
+ tx, err := s.w.waitOnTxnWithTimeout(time.Second * 5)
+ a.NoError(err)
+
+ a.Equal(tx.Txn.Type, protocol.StateProofTx)
+
+ lastAttestedRound := basics.Round(tx.Txn.Message.LastAttestedRound)
+ if lastAttestedRound < basics.Round(iter+2)*basics.Round(proto.StateProofInterval) {
+ continue
+ }
+
+ a.Equal(lastAttestedRound, basics.Round(iter+2)*basics.Round(proto.StateProofInterval))
+ a.Equal(tx.Txn.Message.FirstAttestedRound, (iter+1)*proto.StateProofInterval+1)
+
+ verifySha256BlockHeadersCommitments(a, tx.Txn.Message, s.w.blocks)
+
+ if !lastMessage.MsgIsZero() {
+ verifier := stateproof.MkVerifierWithLnProvenWeight(lastMessage.VotersCommitment, lastMessage.LnProvenWeight, proto.StateProofStrengthTarget)
+
+ err := verifier.Verify(uint64(lastAttestedRound), tx.Txn.Message.Hash(), &tx.Txn.StateProof)
+ a.NoError(err)
+
+ }
+
+ lastMessage = tx.Txn.Message
+ break
+ }
+ }
+}
+
+func verifySha256BlockHeadersCommitments(a *require.Assertions, message stateproofmsg.Message, blocks map[basics.Round]bookkeeping.BlockHeader) {
+ blkHdrArr := make(lightBlockHeaders, message.LastAttestedRound-message.FirstAttestedRound+1)
+ for i := uint64(0); i < message.LastAttestedRound-message.FirstAttestedRound+1; i++ {
+ hdr := blocks[basics.Round(message.FirstAttestedRound+i)]
+ blkHdrArr[i] = hdr.ToLightBlockHeader()
+ }
+
+ tree, err := merklearray.BuildVectorCommitmentTree(blkHdrArr, crypto.HashFactory{HashType: crypto.Sha256})
+ a.NoError(err)
+
+ a.Equal(tree.Root(), crypto.GenericDigest(message.BlockHeadersCommitment))
+}
+
+func TestGenerateStateProofMessageForSmallRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.w.latest--
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ _, err := GenerateStateProofMessage(s, 240, s.w.blocks[s.w.latest])
+ a.ErrorIs(err, errInvalidParams)
+}
+
+func TestMessageLnApproxError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.w.latest--
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ s.advanceLatest(2*proto.StateProofInterval + proto.StateProofInterval/2)
+ tracking := s.w.blocks[512].StateProofTracking[protocol.StateProofBasic]
+ tracking.StateProofOnlineTotalWeight = basics.MicroAlgos{}
+ newtracking := tracking
+ s.w.blocks[512].StateProofTracking[protocol.StateProofBasic] = newtracking
+
+ _, err := GenerateStateProofMessage(s, 256, s.w.blocks[512])
+ a.ErrorIs(err, stateproof.ErrIllegalInputForLnApprox)
+}
+
+func TestMessageMissingHeaderOnInterval(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
+ s.w.latest--
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ s.advanceLatest(2*proto.StateProofInterval + proto.StateProofInterval/2)
+ delete(s.w.blocks, 510)
+
+ _, err := GenerateStateProofMessage(s, 256, s.w.blocks[512])
+ a.ErrorIs(err, ledgercore.ErrNoEntry{Round: 510})
+}
+
+func TestGenerateBlockProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys, len(keys))
+ dbs, _ := dbOpenTest(t, true)
+ w := NewWorker(dbs.Wdb, logging.TestingLog(t), s, s, s, s)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.w.latest--
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ w.Start()
+ defer w.Shutdown()
+
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ for iter := uint64(0); iter < 5; iter++ {
+ s.advanceLatest(proto.StateProofInterval)
+
+ tx := <-s.w.txmsg
+ // we have a new tx. now attempt to fetch a block proof.
+ firstAttestedRound := tx.Txn.Message.FirstAttestedRound
+ lastAttestedRound := tx.Txn.Message.LastAttestedRound
+
+ headers, err := FetchLightHeaders(s, proto.StateProofInterval, basics.Round(lastAttestedRound))
+ a.NoError(err)
+ a.Equal(proto.StateProofInterval, uint64(len(headers)))
+
+ // attempting to get block proof for every block in the interval
+ for i := firstAttestedRound; i < lastAttestedRound; i++ {
+ headerIndex := i - firstAttestedRound
+ proof, err := GenerateProofOfLightBlockHeaders(proto.StateProofInterval, headers, headerIndex)
+ a.NoError(err)
+ a.NotNil(proof)
+
+ lightheader := headers[headerIndex]
+ err = merklearray.VerifyVectorCommitment(
+ tx.Txn.Message.BlockHeadersCommitment,
+ map[uint64]crypto.Hashable{headerIndex: &lightheader},
+ proof.ToProof())
+
+ a.NoError(err)
+ }
+ }
+}
+
+func TestGenerateBlockProofOnSmallArray(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerForStateProofMessageStubs(keys, len(keys))
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ s.w.latest--
+ s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+
+ s.advanceLatest(2 * proto.StateProofInterval)
+ headers, err := FetchLightHeaders(s, proto.StateProofInterval, basics.Round(2*proto.StateProofInterval))
+ a.NoError(err)
+ headers = headers[1:]
+
+ _, err = GenerateProofOfLightBlockHeaders(proto.StateProofInterval, headers, 1)
+ a.ErrorIs(err, errInvalidParams)
+}
diff --git a/stateproof/verify/stateproof.go b/stateproof/verify/stateproof.go
new file mode 100644
index 000000000..66c6f09d4
--- /dev/null
+++ b/stateproof/verify/stateproof.go
@@ -0,0 +1,179 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package verify
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var (
+ errStateProofCrypto = errors.New("state proof crypto error")
+ errStateProofParamCreation = errors.New("state proof param creation error")
+ errStateProofNotEnabled = errors.New("state proofs are not enabled")
+ errNotAtRightMultiple = errors.New("state proof is not in a valid round multiple")
+ errInvalidVotersRound = errors.New("invalid voters round")
+ errInsufficientWeight = errors.New("insufficient state proof weight")
+)
+
+// AcceptableStateProofWeight computes the acceptable signed weight
+// of a state proof if it were to appear in a transaction with a
+// particular firstValid round. Earlier rounds require a smaller proof.
+// votersHdr specifies the block that contains the vector commitment of
+// the voters for this state proof (and thus the state proof is for the interval
+// (votersHdr.Round(), votersHdr.Round()+StateProofInterval].
+//
+// logger must not be nil; use at least logging.Base()
+func AcceptableStateProofWeight(votersHdr *bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+ latestRoundInProof := votersHdr.Round + basics.Round(proto.StateProofInterval)
+ total := votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight
+
+ // The acceptable weight depends on the elapsed time (in rounds)
+ // from the block we are trying to construct a proof for.
+ // Start by subtracting the latest round number in the state proof interval.
+ // If that round hasn't even passed yet, require 100% votes in proof.
+ offset := firstValid.SubSaturate(latestRoundInProof)
+ if offset == 0 {
+ return total.ToUint64()
+ }
+
+ // During the first proto.StateProofInterval/2 blocks, the
+ // signatures are still being broadcast, so, continue requiring
+ // 100% votes.
+ offset = offset.SubSaturate(basics.Round(proto.StateProofInterval / 2))
+ if offset == 0 {
+ return total.ToUint64()
+ }
+
+ // In the next proto.StateProofInterval/2 blocks, linearly scale
+ // the acceptable weight from 100% to StateProofWeightThreshold.
+ // If we are outside of that window, accept any weight at or above
+ // StateProofWeightThreshold.
+ provenWeight, overflowed := basics.Muldiv(total.ToUint64(), uint64(proto.StateProofWeightThreshold), 1<<32)
+ if overflowed || provenWeight > total.ToUint64() {
+ // Shouldn't happen, but a safe fallback is to accept a larger proof.
+ logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight",
+ total, proto.StateProofInterval, latestRoundInProof, firstValid)
+ return 0
+ }
+
+ if offset >= basics.Round(proto.StateProofInterval/2) {
+ return provenWeight
+ }
+
+ scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, proto.StateProofInterval/2-uint64(offset), proto.StateProofInterval/2)
+ if overflowed {
+ // Shouldn't happen, but a safe fallback is to accept a larger state proof.
+ logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow scaledWeight",
+ total, proto.StateProofInterval, latestRoundInProof, firstValid)
+ return 0
+ }
+
+ w, overflowed := basics.OAdd(provenWeight, scaledWeight)
+ if overflowed {
+ // Shouldn't happen, but a safe fallback is to accept a larger state proof.
+ logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
+ total, proto.StateProofInterval, latestRoundInProof, firstValid, provenWeight, scaledWeight)
+ return 0
+ }
+
+ return w
+}
+
+// GetProvenWeight computes the parameters for building or verifying
+// a state proof for the interval (votersHdr, latestRoundInProofHdr], using voters from block votersHdr.
+func GetProvenWeight(votersHdr *bookkeeping.BlockHeader, latestRoundInProofHdr *bookkeeping.BlockHeader) (uint64, error) {
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+
+ if proto.StateProofInterval == 0 {
+ return 0, errStateProofNotEnabled
+ }
+
+ if votersHdr.Round%basics.Round(proto.StateProofInterval) != 0 {
+ err := fmt.Errorf("votersHdr %d not a multiple of %d",
+ votersHdr.Round, proto.StateProofInterval)
+ return 0, err
+ }
+
+ if latestRoundInProofHdr.Round != votersHdr.Round+basics.Round(proto.StateProofInterval) {
+ err := fmt.Errorf("certifying block %d not %d ahead of voters %d",
+ latestRoundInProofHdr.Round, proto.StateProofInterval, votersHdr.Round)
+ return 0, err
+ }
+
+ totalWeight := votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
+ provenWeight, overflowed := basics.Muldiv(totalWeight, uint64(proto.StateProofWeightThreshold), 1<<32)
+ if overflowed {
+ err := fmt.Errorf("overflow computing provenWeight[%d]: %d * %d / (1<<32)",
+ latestRoundInProofHdr.Round, totalWeight, proto.StateProofWeightThreshold)
+ return 0, err
+ }
+
+ return provenWeight, nil
+}
+
+// ValidateStateProof checks that a state proof is valid.
+func ValidateStateProof(latestRoundInIntervalHdr *bookkeeping.BlockHeader, stateProof *stateproof.StateProof, votersHdr *bookkeeping.BlockHeader, atRound basics.Round, msg *stateproofmsg.Message) error {
+ proto := config.Consensus[latestRoundInIntervalHdr.CurrentProtocol]
+
+ if proto.StateProofInterval == 0 {
+ return fmt.Errorf("rounds = %d: %w", proto.StateProofInterval, errStateProofNotEnabled)
+ }
+
+ if latestRoundInIntervalHdr.Round%basics.Round(proto.StateProofInterval) != 0 {
+ return fmt.Errorf("state proof at %d for non-multiple of %d: %w", latestRoundInIntervalHdr.Round, proto.StateProofInterval, errNotAtRightMultiple)
+ }
+
+ votersRound := latestRoundInIntervalHdr.Round.SubSaturate(basics.Round(proto.StateProofInterval))
+ if votersRound != votersHdr.Round {
+ return fmt.Errorf("new state proof is for %d (voters %d), but votersHdr from %d: %w",
+ latestRoundInIntervalHdr.Round, votersRound, votersHdr.Round, errInvalidVotersRound)
+ }
+
+ acceptableWeight := AcceptableStateProofWeight(votersHdr, atRound, logging.Base())
+ if stateProof.SignedWeight < acceptableWeight {
+ return fmt.Errorf("insufficient weight at round %d: %d < %d: %w",
+ atRound, stateProof.SignedWeight, acceptableWeight, errInsufficientWeight)
+ }
+
+ provenWeight, err := GetProvenWeight(votersHdr, latestRoundInIntervalHdr)
+ if err != nil {
+ return fmt.Errorf("%v: %w", err, errStateProofParamCreation)
+ }
+
+ verifier, err := stateproof.MkVerifier(votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ provenWeight,
+ config.Consensus[votersHdr.CurrentProtocol].StateProofStrengthTarget)
+ if err != nil {
+ return err
+ }
+
+ err = verifier.Verify(uint64(latestRoundInIntervalHdr.Round), msg.Hash(), stateProof)
+ if err != nil {
+ return fmt.Errorf("%v: %w", err, errStateProofCrypto)
+ }
+ return nil
+}
diff --git a/stateproof/verify/stateproof_test.go b/stateproof/verify/stateproof_test.go
new file mode 100644
index 000000000..38a76c2b8
--- /dev/null
+++ b/stateproof/verify/stateproof_test.go
@@ -0,0 +1,168 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package verify
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestValidateStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ spHdr := &bookkeeping.BlockHeader{}
+ sp := &stateproof.StateProof{}
+ votersHdr := &bookkeeping.BlockHeader{}
+ var atRound basics.Round
+ msg := &stateproofmsg.Message{BlockHeadersCommitment: []byte("this is an arbitrary message")}
+
+ // will definitely fail with nothing set up
+ err := ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errStateProofNotEnabled)
+
+ spHdr.CurrentProtocol = "TestValidateStateProof"
+ spHdr.Round = 1
+ proto := config.Consensus[spHdr.CurrentProtocol]
+ proto.StateProofInterval = 2
+ proto.StateProofStrengthTarget = 256
+ proto.StateProofWeightThreshold = (1 << 32) * 30 / 100
+ config.Consensus[spHdr.CurrentProtocol] = proto
+
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errNotAtRightMultiple)
+
+ spHdr.Round = 4
+ votersHdr.Round = 4
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errInvalidVotersRound)
+
+ votersHdr.Round = 2
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errStateProofParamCreation)
+
+ votersHdr.CurrentProtocol = spHdr.CurrentProtocol
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ // since proven weight is zero, we cann't create the verifier
+ require.ErrorIs(t, err, stateproof.ErrIllegalInputForLnApprox)
+
+ votersHdr.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ cc := votersHdr.StateProofTracking[protocol.StateProofBasic]
+ cc.StateProofOnlineTotalWeight.Raw = 100
+ votersHdr.StateProofTracking[protocol.StateProofBasic] = cc
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errInsufficientWeight)
+
+ // Require 100% of the weight to be signed in order to accept stateproof before interval/2 rounds has passed from the latest round attested (optimal case)
+ sp.SignedWeight = 99 // suboptimal signed weight
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errInsufficientWeight)
+
+ latestRoundInProof := votersHdr.Round + basics.Round(proto.StateProofInterval)
+ atRound = latestRoundInProof + basics.Round(proto.StateProofInterval/2)
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errInsufficientWeight)
+
+ // This suboptimal signed weight should be enough for this round
+ atRound++
+ err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ // still err, but a different err case to cover
+ require.ErrorIs(t, err, errStateProofCrypto)
+
+ // Above cases leave validateStateProof() with 100% coverage.
+ // crypto/stateproof.Verify has its own tests
+}
+
+func TestAcceptableStateProofWeight(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var votersHdr bookkeeping.BlockHeader
+ var firstValid basics.Round
+ logger := logging.TestingLog(t)
+
+ votersHdr.CurrentProtocol = "TestAcceptableStateProofWeight"
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+ proto.StateProofInterval = 2
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out := AcceptableStateProofWeight(&votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0), out)
+
+ votersHdr.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ cc := votersHdr.StateProofTracking[protocol.StateProofBasic]
+ cc.StateProofOnlineTotalWeight.Raw = 100
+ votersHdr.StateProofTracking[protocol.StateProofBasic] = cc
+ out = AcceptableStateProofWeight(&votersHdr, firstValid, logger)
+ require.Equal(t, uint64(100), out)
+
+ // this should exercise the second return case
+ firstValid = basics.Round(3)
+ out = AcceptableStateProofWeight(&votersHdr, firstValid, logger)
+ require.Equal(t, uint64(100), out)
+
+ firstValid = basics.Round(6)
+ proto.StateProofWeightThreshold = 999999999
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out = AcceptableStateProofWeight(&votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0x17), out)
+
+ proto.StateProofInterval = 10000
+ votersHdr.Round = 10000
+ firstValid = basics.Round(29000 - 2)
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ cc.StateProofOnlineTotalWeight.Raw = 0x7fffffffffffffff
+ votersHdr.StateProofTracking[protocol.StateProofBasic] = cc
+ proto.StateProofWeightThreshold = 0x7fffffff
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ out = AcceptableStateProofWeight(&votersHdr, firstValid, logger)
+ require.Equal(t, uint64(0x4cd35a85213a92a2), out)
+
+ // Covers everything except "overflow that shouldn't happen" branches
+}
+
+func TestStateProofParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var votersHdr bookkeeping.BlockHeader
+ var hdr bookkeeping.BlockHeader
+
+ _, err := GetProvenWeight(&votersHdr, &hdr)
+ require.Error(t, err) // not enabled
+
+ votersHdr.CurrentProtocol = "TestStateProofParams"
+ proto := config.Consensus[votersHdr.CurrentProtocol]
+ proto.StateProofInterval = 2
+ config.Consensus[votersHdr.CurrentProtocol] = proto
+ votersHdr.Round = 1
+ _, err = GetProvenWeight(&votersHdr, &hdr)
+ require.Error(t, err) // wrong round
+
+ votersHdr.Round = 2
+ hdr.Round = 3
+ _, err = GetProvenWeight(&votersHdr, &hdr)
+ require.Error(t, err) // wrong round
+
+ // Covers all cases except overflow
+}
diff --git a/compactcert/worker.go b/stateproof/worker.go
index 38341d7ad..ca277dac8 100644
--- a/compactcert/worker.go
+++ b/stateproof/worker.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package compactcert
+package stateproof
import (
"context"
@@ -23,9 +23,10 @@ import (
"github.com/algorand/go-deadlock"
- "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -34,16 +35,17 @@ import (
)
type builder struct {
- *compactcert.Builder
+ *stateproof.Builder
voters *ledgercore.VotersForRound
votersHdr bookkeeping.BlockHeader
+ message stateproofmsg.Message
}
-// Worker builds compact certificates, by broadcasting
+// Worker builds state proofs, by broadcasting
// signatures using this node's participation keys, by collecting
// signatures sent by others, and by sending out the resulting
-// compact certs in a transaction.
+// state proof in a transaction.
type Worker struct {
// The mutex serializes concurrent message handler invocations
// from the network stack.
@@ -86,34 +88,34 @@ func NewWorker(db db.Accessor, log logging.Logger, accts Accounts, ledger Ledger
}
// Start starts the goroutines for the worker.
-func (ccw *Worker) Start() {
- err := ccw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+func (spw *Worker) Start() {
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
return initDB(tx)
})
if err != nil {
- ccw.log.Warnf("ccw.Start(): initDB: %v", err)
+ spw.log.Warnf("spw.Start(): initDB: %v", err)
return
}
- ccw.initBuilders()
+ spw.initBuilders()
handlers := []network.TaggedMessageHandler{
- {Tag: protocol.CompactCertSigTag, MessageHandler: network.HandlerFunc(ccw.handleSigMessage)},
+ {Tag: protocol.StateProofSigTag, MessageHandler: network.HandlerFunc(spw.handleSigMessage)},
}
- ccw.net.RegisterHandlers(handlers)
+ spw.net.RegisterHandlers(handlers)
- latest := ccw.ledger.Latest()
+ latest := spw.ledger.Latest()
- ccw.wg.Add(1)
- go ccw.signer(latest)
+ spw.wg.Add(1)
+ go spw.signer(latest)
- ccw.wg.Add(1)
- go ccw.builder(latest)
+ spw.wg.Add(1)
+ go spw.builder(latest)
}
// Shutdown stops any goroutines associated with this worker.
-func (ccw *Worker) Shutdown() {
- ccw.shutdown()
- ccw.wg.Wait()
- ccw.db.Close()
+func (spw *Worker) Shutdown() {
+ spw.shutdown()
+ spw.wg.Wait()
+ spw.db.Close()
}
diff --git a/stateproof/worker_test.go b/stateproof/worker_test.go
new file mode 100644
index 000000000..4c35c8c69
--- /dev/null
+++ b/stateproof/worker_test.go
@@ -0,0 +1,1291 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproof
+
+import (
+ "context"
+ "database/sql"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-deadlock"
+)
+
+type testWorkerStubs struct {
+ t testing.TB
+ mu deadlock.Mutex
+ latest basics.Round
+ waiters map[basics.Round]chan struct{}
+ waitersCount map[basics.Round]int
+ blocks map[basics.Round]bookkeeping.BlockHeader
+ keys []account.Participation
+ keysForVoters []account.Participation
+ sigmsg chan []byte
+ txmsg chan transactions.SignedTxn
+ totalWeight int
+ deletedStateProofKeys map[account.ParticipationID]basics.Round
+}
+
+func newWorkerStubs(t testing.TB, keys []account.Participation, totalWeight int) *testWorkerStubs {
+ s := &testWorkerStubs{
+ t: nil,
+ mu: deadlock.Mutex{},
+ latest: 0,
+ waiters: make(map[basics.Round]chan struct{}),
+ waitersCount: make(map[basics.Round]int),
+ blocks: make(map[basics.Round]bookkeeping.BlockHeader),
+ keys: keys,
+ keysForVoters: keys,
+ sigmsg: make(chan []byte, 1024*1024),
+ txmsg: make(chan transactions.SignedTxn, 1024),
+ totalWeight: totalWeight,
+ deletedStateProofKeys: map[account.ParticipationID]basics.Round{},
+ }
+ s.latest--
+ s.addBlock(2 * basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
+ return s
+}
+
+func (s *testWorkerStubs) addBlock(spNextRound basics.Round) {
+ s.latest++
+
+ hdr := bookkeeping.BlockHeader{}
+ hdr.Round = s.latest
+ hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
+
+ var stateProofBasic = bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: make([]byte, stateproof.HashSize),
+ StateProofOnlineTotalWeight: basics.MicroAlgos{},
+ StateProofNextRound: 0,
+ }
+ stateProofBasic.StateProofOnlineTotalWeight.Raw = uint64(s.totalWeight)
+
+ if hdr.Round > 0 {
+ // Just so it's not zero, since the signer logic checks for all-zeroes
+ stateProofBasic.StateProofVotersCommitment[1] = 0x12
+ }
+
+ stateProofBasic.StateProofNextRound = spNextRound
+ hdr.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: stateProofBasic,
+ }
+
+ s.blocks[s.latest] = hdr
+
+ if s.waiters[s.latest] != nil {
+ close(s.waiters[s.latest])
+ }
+}
+
+func (s *testWorkerStubs) StateProofKeys(rnd basics.Round) (out []account.StateProofSecretsForRound) {
+ for _, part := range s.keys {
+ partRecord := account.ParticipationRecord{
+ ParticipationID: part.ID(),
+ Account: part.Parent,
+ FirstValid: part.FirstValid,
+ LastValid: part.LastValid,
+ KeyDilution: part.KeyDilution,
+ LastVote: 0,
+ LastBlockProposal: 0,
+ LastStateProof: 0,
+ EffectiveFirst: 0,
+ EffectiveLast: 0,
+ VRF: part.VRF,
+ Voting: part.Voting,
+ }
+ signerInRound := part.StateProofSecrets.GetSigner(uint64(rnd))
+ partRecordForRound := account.StateProofSecretsForRound{
+ ParticipationRecord: partRecord,
+ StateProofSecrets: signerInRound,
+ }
+ out = append(out, partRecordForRound)
+ }
+ return
+}
+
+func (s *testWorkerStubs) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
+ s.mu.Lock()
+ s.deletedStateProofKeys[id] = round
+ s.mu.Unlock()
+
+ return nil
+}
+func (s *testWorkerStubs) GetNumDeletedKeys() int {
+ s.mu.Lock()
+ numDeltedKeys := len(s.deletedStateProofKeys)
+ s.mu.Unlock()
+
+ return numDeltedKeys
+}
+
+func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ hdr, ok := s.blocks[r]
+ if !ok {
+ return hdr, ledgercore.ErrNoEntry{
+ Round: r,
+ Latest: s.latest,
+ Committed: s.latest,
+ }
+ }
+
+ return hdr, nil
+}
+
+func (s *testWorkerStubs) VotersForStateProof(r basics.Round) (*ledgercore.VotersForRound, error) {
+ voters := &ledgercore.VotersForRound{
+ Proto: config.Consensus[protocol.ConsensusCurrentVersion],
+ AddrToPos: make(map[basics.Address]uint64),
+ TotalWeight: basics.MicroAlgos{Raw: uint64(s.totalWeight)},
+ }
+
+ for i, k := range s.keysForVoters {
+ voters.AddrToPos[k.Parent] = uint64(i)
+ voters.Participants = append(voters.Participants, basics.Participant{
+ PK: *k.StateProofSecrets.GetVerifier(),
+ Weight: 1,
+ })
+ }
+
+ tree, err := merklearray.BuildVectorCommitmentTree(voters.Participants, crypto.HashFactory{HashType: stateproof.HashType})
+ if err != nil {
+ return nil, err
+ }
+
+ voters.Tree = tree
+ return voters, nil
+}
+
+func (s *testWorkerStubs) GenesisHash() crypto.Digest {
+ return crypto.Digest{0x01, 0x02, 0x03, 0x04}
+}
+
+func (s *testWorkerStubs) Latest() basics.Round {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.latest
+}
+
+func (s *testWorkerStubs) Wait(r basics.Round) chan struct{} {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.waiters[r] == nil {
+ s.waiters[r] = make(chan struct{})
+ s.waitersCount[r] = 0
+ if r <= s.latest {
+ close(s.waiters[r])
+ }
+ }
+ s.waitersCount[r] = s.waitersCount[r] + 1
+ return s.waiters[r]
+}
+
+func (s *testWorkerStubs) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error {
+ require.Equal(s.t, tag, protocol.StateProofSigTag)
+ s.sigmsg <- data
+ return nil
+}
+
+func (s *testWorkerStubs) BroadcastInternalSignedTxGroup(tx []transactions.SignedTxn) error {
+ require.Equal(s.t, len(tx), 1)
+ s.txmsg <- tx[0]
+ return nil
+}
+
+func (s *testWorkerStubs) RegisterHandlers([]network.TaggedMessageHandler) {
+}
+
+func (s *testWorkerStubs) advanceLatest(delta uint64) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for r := uint64(0); r < delta; r++ {
+ s.addBlock(s.blocks[s.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ }
+}
+
+func (s *testWorkerStubs) waitOnSigWithTimeout(timeout time.Duration) ([]byte, error) {
+ select {
+ case sig := <-s.sigmsg:
+ return sig, nil
+ case <-time.After(timeout):
+ return nil, fmt.Errorf("timeout waiting on sigmsg")
+ }
+}
+
+func (s *testWorkerStubs) waitOnTxnWithTimeout(timeout time.Duration) (transactions.SignedTxn, error) {
+ select {
+ case signedTx := <-s.txmsg:
+ return signedTx, nil
+ case <-time.After(timeout):
+ return transactions.SignedTxn{}, fmt.Errorf("timeout waiting on sigmsg")
+ }
+}
+
+func newTestWorkerDB(t testing.TB, s *testWorkerStubs, dba db.Accessor) *Worker {
+ return NewWorker(dba, logging.TestingLog(t), s, s, s, s)
+}
+
+func newTestWorker(t testing.TB, s *testWorkerStubs) *Worker {
+ dbs, _ := dbOpenTest(t, true)
+ return newTestWorkerDB(t, s, dbs.Wdb)
+}
+
+// You must call defer part.Close() after calling this function,
+// since it creates a DB accessor but the caller must close it (required for mss)
+func newPartKey(t testing.TB, parent basics.Address) account.PersistedParticipation {
+ fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
+ partDB, err := db.MakeAccessor(fn, false, true)
+ require.NoError(t, err)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, basics.Round(15*proto.StateProofInterval), proto.DefaultKeyDilution)
+ require.NoError(t, err)
+
+ return part
+}
+
+func TestWorkerAllSigs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ // Go through several iterations, making sure that we get
+ // the signatures and certs broadcast at each round.
+ for iter := 0; iter < 5; iter++ {
+ s.advanceLatest(proto.StateProofInterval)
+
+ for i := 0; i < len(keys); i++ {
+ // Expect all signatures to be broadcast.
+ _, err := s.waitOnSigWithTimeout(time.Second * 2)
+ require.NoError(t, err)
+ }
+
+ // Expect a state proof to be formed.
+ for {
+ tx, err := s.waitOnTxnWithTimeout(time.Second * 5)
+ require.NoError(t, err)
+
+ lastAttestedRound := basics.Round(tx.Txn.Message.LastAttestedRound)
+ require.Equal(t, tx.Txn.Type, protocol.StateProofTx)
+ if lastAttestedRound < basics.Round(iter+2)*basics.Round(proto.StateProofInterval) {
+ continue
+ }
+
+ require.Equal(t, lastAttestedRound, basics.Round(iter+2)*basics.Round(proto.StateProofInterval))
+
+ stateProofLatestRound, err := s.BlockHdr(lastAttestedRound)
+ require.NoError(t, err)
+
+ votersRound := lastAttestedRound.SubSaturate(basics.Round(proto.StateProofInterval))
+
+ msg, err := GenerateStateProofMessage(s, uint64(votersRound), stateProofLatestRound)
+ require.NoError(t, err)
+ require.Equal(t, msg, tx.Txn.Message)
+
+ provenWeight, overflowed := basics.Muldiv(uint64(s.totalWeight), uint64(proto.StateProofWeightThreshold), 1<<32)
+ require.False(t, overflowed)
+
+ voters, err := s.VotersForStateProof(lastAttestedRound - basics.Round(proto.StateProofInterval) - basics.Round(proto.StateProofVotersLookback))
+ require.NoError(t, err)
+
+ verif, err := stateproof.MkVerifier(voters.Tree.Root(), provenWeight, proto.StateProofStrengthTarget)
+ require.NoError(t, err)
+
+ err = verif.Verify(uint64(lastAttestedRound), tx.Txn.Message.Hash(), &tx.Txn.StateProof)
+ require.NoError(t, err)
+ break
+ }
+ }
+}
+
+func TestWorkerPartialSigs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 7; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+ s.advanceLatest(proto.StateProofInterval)
+
+ for i := 0; i < len(keys); i++ {
+ // Expect all signatures to be broadcast.
+ _, err := s.waitOnSigWithTimeout(time.Second * 2)
+ require.NoError(t, err)
+ }
+
+ // No state proof should be formed yet: not enough sigs for a stateproof this early.
+ select {
+ case <-s.txmsg:
+ t.Fatal("state proof formed too early")
+ case <-time.After(time.Second):
+ }
+
+ // Expect a state proof to be formed in the next StateProofInterval/2.
+ s.advanceLatest(proto.StateProofInterval / 2)
+
+ tx, err := s.waitOnTxnWithTimeout(time.Second * 5)
+ require.NoError(t, err)
+
+ lastAttestedRound := basics.Round(tx.Txn.Message.LastAttestedRound)
+ require.Equal(t, tx.Txn.Type, protocol.StateProofTx)
+ require.Equal(t, lastAttestedRound, 2*basics.Round(proto.StateProofInterval))
+
+ stateProofLatestRound, err := s.BlockHdr(lastAttestedRound)
+ require.NoError(t, err)
+
+ votersRound := lastAttestedRound.SubSaturate(basics.Round(proto.StateProofInterval))
+
+ msg, err := GenerateStateProofMessage(s, uint64(votersRound), stateProofLatestRound)
+ require.NoError(t, err)
+ require.Equal(t, msg, tx.Txn.Message)
+
+ provenWeight, overflowed := basics.Muldiv(uint64(s.totalWeight), uint64(proto.StateProofWeightThreshold), 1<<32)
+ require.False(t, overflowed)
+
+ voters, err := s.VotersForStateProof(lastAttestedRound - basics.Round(proto.StateProofInterval) - basics.Round(proto.StateProofVotersLookback))
+ require.NoError(t, err)
+
+ verif, err := stateproof.MkVerifier(voters.Tree.Root(), provenWeight, proto.StateProofStrengthTarget)
+ require.NoError(t, err)
+ err = verif.Verify(uint64(lastAttestedRound), msg.Hash(), &tx.Txn.StateProof)
+ require.NoError(t, err)
+}
+
+func TestWorkerInsufficientSigs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(3 * proto.StateProofInterval)
+
+ for i := 0; i < len(keys); i++ {
+ // Expect all signatures to be broadcast.
+ _, err := s.waitOnSigWithTimeout(time.Second * 2)
+ require.NoError(t, err)
+ }
+
+ // No state proof should be formed: not enough sigs.
+ select {
+ case <-s.txmsg:
+ t.Fatal("state proof formed without enough sigs")
+ case <-time.After(time.Second):
+ }
+}
+
+func TestWorkerRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(3*proto.StateProofInterval - 1)
+
+ dbRand := crypto.RandUint64()
+
+ formedAt := -1
+ for i := 0; formedAt < 0 && i < len(keys); i++ {
+ // Give one key at a time to the worker, and then shut it down,
+ // to make sure that it will correctly save and restore these
+ // signatures across restart.
+ s.keys = keys[i : i+1]
+ dbs, _ := dbOpenTestRand(t, true, dbRand)
+ w := newTestWorkerDB(t, s, dbs.Wdb)
+ w.Start()
+
+ // Check if the cert formed
+ select {
+ case <-s.txmsg:
+ formedAt = i
+ case <-time.After(time.Second):
+ }
+
+ w.Shutdown()
+ }
+
+ require.True(t, formedAt > 1)
+ require.True(t, formedAt < 5)
+}
+
+func TestWorkerHandleSig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(3 * proto.StateProofInterval)
+
+ for i := 0; i < len(keys); i++ {
+ // Expect all signatures to be broadcast.
+ msg, err := s.waitOnSigWithTimeout(time.Second * 2)
+ require.NoError(t, err)
+
+ res := w.handleSigMessage(network.IncomingMessage{
+ Data: msg,
+ })
+
+ // This should be a dup signature, so should not be broadcast
+ // but also not disconnected.
+ require.Equal(t, res.Action, network.Ignore)
+ }
+}
+
+func TestSignerDeletesUnneededStateProofKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ nParticipants := 2
+ for i := 0; i < nParticipants; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(3 * proto.StateProofInterval)
+ // Expect all signatures to be broadcast.
+
+ require.Zero(t, s.GetNumDeletedKeys())
+ w.signStateProof(s.blocks[basics.Round(proto.StateProofInterval)])
+ require.Equal(t, s.GetNumDeletedKeys(), nParticipants)
+}
+
+func TestSignerDoesntDeleteKeysWhenDBDoesntStoreSigs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ dbs, _ := dbOpenTest(t, true)
+
+ logger := logging.NewLogger()
+ logger.SetOutput(ioutil.Discard)
+
+ w := NewWorker(dbs.Wdb, logger, s, s, s, s)
+
+ w.Start()
+ defer w.Shutdown()
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(3 * proto.StateProofInterval)
+ // Expect all signatures to be broadcast.
+
+ require.NoError(t, w.db.Atomic(
+ func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("DROP TABLE sigs")
+ return err
+ }),
+ )
+
+ w.signStateProof(s.blocks[3*basics.Round(proto.StateProofInterval)])
+ require.Zero(t, s.GetNumDeletedKeys())
+}
+
+func TestWorkerRemoveBuildersAndSignatures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const expectedStateProofs = 8
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ for iter := 0; iter < expectedStateProofs; iter++ {
+ s.advanceLatest(proto.StateProofInterval)
+ tx := <-s.txmsg
+ a.Equal(tx.Txn.Type, protocol.StateProofTx)
+ }
+
+ err := waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+ a.Equal(expectedStateProofs, len(w.builders))
+
+ var roundSigs map[basics.Round][]pendingSig
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx)
+ return
+ })
+
+ a.Equal(expectedStateProofs, len(roundSigs))
+
+ // add block that confirm a state proof for interval: expectedStateProofs - 1
+ s.mu.Lock()
+ s.addBlock(basics.Round((expectedStateProofs - 1) * config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
+ s.mu.Unlock()
+
+ err = waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+ a.Equal(3, len(w.builders))
+
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx)
+ return
+ })
+
+ a.Equal(3, len(roundSigs))
+}
+
+func TestWorkerBuildersRecoveryLimit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ for iter := uint64(0); iter < proto.StateProofMaxRecoveryIntervals+1; iter++ {
+ s.advanceLatest(proto.StateProofInterval)
+ tx := <-s.txmsg
+ a.Equal(tx.Txn.Type, protocol.StateProofTx)
+ }
+
+ // since this test involves go routine, we would like to make sure that when
+ // we sample the builder it already processed our current round.
+ // in order to that, we wait for singer and the builder to wait.
+ // then we push one more round so the builder could process it (since the builder might skip rounds)
+ err := waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+ s.mu.Lock()
+ s.addBlock(basics.Round(proto.StateProofInterval * 2))
+ s.mu.Unlock()
+ err = waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+
+ // should not give up on rounds
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(w.builders)))
+
+ var roundSigs map[basics.Round][]pendingSig
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx)
+ return
+ })
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(roundSigs)))
+
+ s.advanceLatest(proto.StateProofInterval)
+ tx := <-s.txmsg
+ a.Equal(tx.Txn.Type, protocol.StateProofTx)
+
+ err = waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+ s.mu.Lock()
+ s.addBlock(basics.Round(proto.StateProofInterval * 2))
+ s.mu.Unlock()
+ err = waitForBuilderAndSignerToWaitOnRound(s)
+ a.NoError(err)
+
+ // should not give up on rounds
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(w.builders)))
+
+ roundSigs = make(map[basics.Round][]pendingSig)
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx)
+ return
+ })
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(roundSigs)))
+}
+
+func waitForBuilderAndSignerToWaitOnRound(s *testWorkerStubs) error {
+ const maxRetries = 10000
+ i := 0
+ for {
+ s.mu.Lock()
+ r := s.latest + 1
+ // in order to make sure the builder and the signer are waiting for a round we need to make sure
+ // that round r has c channel and r +1 doesn't have.
+ // we also want to make sure that the builder and the singer are waiting
+ isWaitingForRound := s.waiters[r] != nil && s.waiters[r+1] == nil
+ isWaitingForRound = isWaitingForRound && (s.waitersCount[r] == 2)
+ s.mu.Unlock()
+ if !isWaitingForRound {
+ if i == maxRetries {
+ return fmt.Errorf("timeout while waiting for round")
+ }
+ i++
+ time.Sleep(time.Millisecond)
+ continue
+ }
+ return nil
+ }
+}
+
+type sigOrigin int
+
+const (
+ sigFromThisNode sigOrigin = iota
+ sigNotFromThisNode
+ sigAlternateOrigin
+)
+
+// getSignaturesInDatabase sets up the db with signatures. This function supports creating up to StateProofInterval/2 address.
+func getSignaturesInDatabase(t *testing.T, numAddresses int, sigFrom sigOrigin) (
+ signatureBcasted map[basics.Address]int, fromThisNode map[basics.Address]bool,
+ tns *testWorkerStubs, spw *Worker) {
+
+ // Some tests rely on having only one signature being broadcast at a single round.
+ // for that we need to make sure that addresses won't fall into the same broadcast round.
+ // For that same reason we can't have more than StateProofInterval / 2 address
+ require.LessOrEqual(t, uint64(numAddresses), config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval/2)
+
+ // Prepare the addresses and the keys
+ signatureBcasted = make(map[basics.Address]int)
+ fromThisNode = make(map[basics.Address]bool)
+ var keys []account.Participation
+ for i := 0; i < numAddresses; i++ {
+ var parent basics.Address
+ binary.LittleEndian.PutUint64(parent[:], uint64(i))
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ signatureBcasted[parent] = 0
+ }
+
+ tns = newWorkerStubs(t, keys, len(keys))
+ spw = newTestWorker(t, tns)
+
+ // Prepare the database
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return initDB(tx)
+ })
+ require.NoError(t, err)
+
+ // All the keys are for round 255. This way, starting the period at 256,
+ // there will be no disqualified signatures from broadcasting because they are
+ // into the future.
+ round := basics.Round(255)
+
+ // Sign the message
+ spRecords := tns.StateProofKeys(round)
+ sigs := make([]sigFromAddr, 0, len(keys))
+ stateproofMessage := stateproofmsg.Message{}
+ hashedStateproofMessage := stateproofMessage.Hash()
+ for _, key := range spRecords {
+ sig, err := key.StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ require.NoError(t, err)
+ sigs = append(sigs, sigFromAddr{
+ SignerAddress: key.Account,
+ Round: round,
+ Sig: sig,
+ })
+ }
+
+ // Add the signatures to the database
+ ftn := sigFrom == sigAlternateOrigin || sigFrom == sigFromThisNode
+ for _, sfa := range sigs {
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return addPendingSig(tx, sfa.Round, pendingSig{
+ signer: sfa.SignerAddress,
+ sig: sfa.Sig,
+ fromThisNode: ftn,
+ })
+ })
+ require.NoError(t, err)
+ fromThisNode[sfa.SignerAddress] = ftn
+ if sigFrom == sigAlternateOrigin {
+ // alternate the fromThisNode argument between addresses
+ ftn = !ftn
+ }
+ }
+ return
+}
+
+// TestSigBroacastTwoPerSig checks if each signature is broadcasted twice per period
+// It generates numAddresses and prepares a database with the account/signatures.
+// Then, calls broadcastSigs with round numbers spanning periods and
+// makes sure each account has 2 sigs sent per period if originated locally, and 1 sig
+// if received from another relay.
+func TestSigBroacastTwoPerSig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ signatureBcasted, fromThisNode, tns, spw := getSignaturesInDatabase(t, 10, sigAlternateOrigin)
+
+ for periods := 1; periods < 10; periods += 3 {
+ sendReceiveCountMessages(t, tns, signatureBcasted, fromThisNode, spw, periods)
+ // reopen the channel
+ tns.sigmsg = make(chan []byte, 1024)
+ // reset the counters
+ for addr := range signatureBcasted {
+ signatureBcasted[addr] = 0
+ }
+ }
+}
+
+func sendReceiveCountMessages(t *testing.T, tns *testWorkerStubs, signatureBcasted map[basics.Address]int,
+ fromThisNode map[basics.Address]bool, spw *Worker, periods int) {
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // Collect the broadcast messages
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for bMsg := range tns.sigmsg {
+ sfa := sigFromAddr{}
+ err := protocol.Decode(bMsg, &sfa)
+ require.NoError(t, err)
+ signatureBcasted[sfa.SignerAddress]++
+ }
+ }()
+
+ // Broadcast the messages
+ for brnd := 257; brnd < 257+int(proto.StateProofInterval)*periods; brnd++ {
+ spw.broadcastSigs(basics.Round(brnd), proto)
+ }
+
+ close(tns.sigmsg)
+ wg.Wait()
+
+ // Verify the number of times each signature was broadcast
+ for addr, sb := range signatureBcasted {
+ if fromThisNode[addr] {
+ require.Equal(t, 2*periods, sb)
+ } else {
+ require.Equal(t, periods, sb)
+ }
+ }
+}
+
+func TestBuilderGeneratesValidStateProofTXN(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Shutdown()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+
+ s.advanceLatest(proto.StateProofInterval)
+
+ for i := 0; i < len(keys); i++ {
+ // Expect all signatures to be broadcast.
+ _, err := s.waitOnSigWithTimeout(time.Second * 2)
+ require.NoError(t, err)
+ }
+
+ tx, err := s.waitOnTxnWithTimeout(time.Second * 5)
+ require.NoError(t, err)
+
+ a.NoError(tx.Txn.WellFormed(transactions.SpecialAddresses{}, proto))
+}
+
+// TestForwardNotFromThisNodeSecondHalf tests that relays forward
+// signatures from other nodes only in the second half of the period
+func TestForwardNotFromThisNodeSecondHalf(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, _, tns, spw := getSignaturesInDatabase(t, 10, sigNotFromThisNode)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ for brnd := 0; brnd < int(proto.StateProofInterval*10); brnd++ {
+ spw.broadcastSigs(basics.Round(brnd), proto)
+ select {
+ case <-tns.sigmsg:
+ // The message is broadcast in the second half of the period
+ require.GreaterOrEqual(t, brnd%int(proto.StateProofInterval), int(proto.StateProofInterval)/2)
+ default:
+ }
+ }
+}
+
+// TestForwardNotFromThisNodeFirstHalf tests that relays forward
+// signatures in the first half of the period only if it is from this node
+func TestForwardNotFromThisNodeFirstHalf(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ signatureBcasted, fromThisNode, tns, spw := getSignaturesInDatabase(t, 10, sigAlternateOrigin)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ for brnd := 0; brnd < int(proto.StateProofInterval*10); brnd++ {
+ spw.broadcastSigs(basics.Round(brnd), proto)
+ select {
+ case bMsg := <-tns.sigmsg:
+ sfa := sigFromAddr{}
+ err := protocol.Decode(bMsg, &sfa)
+ require.NoError(t, err)
+
+ // If it is in the first half, then it must be from this node
+ if brnd%int(proto.StateProofInterval) < int(proto.StateProofInterval)/2 {
+ require.True(t, fromThisNode[sfa.SignerAddress])
+ signatureBcasted[sfa.SignerAddress]++
+ continue
+ }
+
+ // The message is broadcast in the second half of the period, can be from this node or another node
+ require.GreaterOrEqual(t, brnd%int(proto.StateProofInterval), int(proto.StateProofInterval)/2)
+ if fromThisNode[sfa.SignerAddress] {
+ // It must have already been broadcasted once in the first period
+ require.Equal(t, brnd/int(proto.StateProofInterval), signatureBcasted[sfa.SignerAddress])
+ }
+ default:
+ }
+ }
+}
+
+func setBlocksAndMessage(t *testing.T, sigRound basics.Round) (s *testWorkerStubs, w *Worker, msg sigFromAddr, msgBytes []byte) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ var address basics.Address
+ crypto.RandBytes(address[:])
+ p := newPartKey(t, address)
+ defer p.Close()
+
+ s = newWorkerStubs(t, []account.Participation{p.Participation}, 10)
+ w = newTestWorker(t, s)
+
+ for r := 0; r < int(proto.StateProofInterval)*2; r++ {
+ s.addBlock(basics.Round(proto.StateProofInterval * 2))
+ }
+
+ msg = sigFromAddr{
+ SignerAddress: address,
+ Round: sigRound,
+ Sig: merklesignature.Signature{},
+ }
+ msgBytes = protocol.Encode(&msg)
+ return
+}
+
+// relays reject signatures for old rounds (before stateproofNext) not disconnect
+func TestWorkerHandleSigOldRounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ intervalRound := basics.Round(proto.StateProofInterval)
+ _, w, msg, msgBytes := setBlocksAndMessage(t, intervalRound)
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Ignore, fwd)
+ require.NoError(t, err)
+}
+
+// relays reject signatures for a round not in ledger
+func TestWorkerHandleSigRoundNotInLedger(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ intervalRound := basics.Round(proto.StateProofInterval)
+ _, w, msg, msgBytes := setBlocksAndMessage(t, intervalRound*10)
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Ignore, fwd)
+ expected := ledgercore.ErrNoEntry{
+ Round: msg.Round,
+ Latest: w.ledger.Latest(),
+ Committed: w.ledger.Latest(),
+ }
+ require.Equal(t, expected, err)
+}
+
+// relays reject signatures for wrong message (sig verification fails)
+func TestWorkerHandleSigWrongSignature(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ intervalRound := basics.Round(proto.StateProofInterval)
+ _, w, msg, msgBytes := setBlocksAndMessage(t, intervalRound*2)
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Disconnect, fwd)
+ expected2 := fmt.Errorf("%w: %v",
+ merklesignature.ErrSignatureSchemeVerificationFailed,
+ merklearray.ErrRootMismatch)
+ require.Equal(t, expected2, err)
+}
+
+// relays reject signatures for address not in top N
+func TestWorkerHandleSigAddrsNotInTopN(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto.StateProofTopVoters = 2
+
+ addresses := make([]basics.Address, 0)
+ var keys []account.Participation
+ for i := 0; i < 4; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ addresses = append(addresses, parent)
+
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys[0:proto.StateProofTopVoters], 10)
+ w := newTestWorker(t, s)
+
+ for r := 0; r < int(proto.StateProofInterval)*2; r++ {
+ s.addBlock(basics.Round(r))
+ }
+
+ msg := sigFromAddr{
+ SignerAddress: addresses[3],
+ Round: basics.Round(proto.StateProofInterval * 2),
+ Sig: merklesignature.Signature{},
+ }
+
+ msgBytes := protocol.Encode(&msg)
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Disconnect, fwd)
+ expected3 := fmt.Errorf("handleSig: %v not in participants for %d",
+ msg.SignerAddress, msg.Round)
+ require.Equal(t, expected3, err)
+}
+
+// Signature already part of the builderForRound, ignore
+func TestWorkerHandleSigAlreadyIn(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ lastRound := proto.StateProofInterval * 2
+ s, w, msg, _ := setBlocksAndMessage(t, basics.Round(lastRound))
+
+ latestBlockHeader, err := w.ledger.BlockHdr(basics.Round(lastRound))
+ require.NoError(t, err)
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, proto.StateProofInterval, latestBlockHeader)
+ require.NoError(t, err)
+
+ hashedStateproofMessage := stateproofMessage.Hash()
+ spRecords := s.StateProofKeys(basics.Round(proto.StateProofInterval * 2))
+ sig, err := spRecords[0].StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ require.NoError(t, err)
+
+ msg.Sig = sig
+ // Create the database
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return initDB(tx)
+ })
+ require.NoError(t, err)
+
+ msgBytes := protocol.Encode(&msg)
+ // First call to add the sig
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Broadcast}, reply)
+
+ // The sig is already there. Shoud get error
+ reply = w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Ignore, fwd)
+ require.NoError(t, err)
+}
+
+// Ignore on db internal error and report error
+func TestWorkerHandleSigExceptionsDbError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ lastRound := proto.StateProofInterval * 2
+ s, w, msg, _ := setBlocksAndMessage(t, basics.Round(lastRound))
+ latestBlockHeader, err := w.ledger.BlockHdr(basics.Round(lastRound))
+ require.NoError(t, err)
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, proto.StateProofInterval, latestBlockHeader)
+ require.NoError(t, err)
+
+ hashedStateproofMessage := stateproofMessage.Hash()
+ spRecords := s.StateProofKeys(basics.Round(proto.StateProofInterval * 2))
+ sig, err := spRecords[0].StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ require.NoError(t, err)
+ msg.Sig = sig
+
+ msgBytes := protocol.Encode(&msg)
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Ignore, fwd)
+ require.Contains(t, "no such table: sigs", err.Error())
+}
+
+// relays reject signatures when could not makeBuilderForRound
+func TestWorkerHandleSigCantMakeBuilder(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ origProto := proto
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = origProto
+ }()
+ proto.StateProofInterval = 512
+ config.Consensus[protocol.ConsensusCurrentVersion] = proto
+
+ var address basics.Address
+ crypto.RandBytes(address[:])
+ p := newPartKey(t, address)
+ defer p.Close()
+
+ s := newWorkerStubs(t, []account.Participation{p.Participation}, 10)
+ w := newTestWorker(t, s)
+
+ for r := 0; r < int(proto.StateProofInterval)*2; r++ {
+ s.addBlock(basics.Round(512))
+ }
+ // remove the first block from the ledger
+ delete(s.blocks, 0)
+
+ msg := sigFromAddr{
+ SignerAddress: address,
+ Round: basics.Round(proto.StateProofInterval),
+ Sig: merklesignature.Signature{},
+ }
+
+ msgBytes := protocol.Encode(&msg)
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Ignore, fwd)
+ expected := ledgercore.ErrNoEntry{
+ Round: 0,
+ Latest: w.ledger.Latest(),
+ Committed: w.ledger.Latest(),
+ }
+ require.Equal(t, expected, err)
+}
+
+// relays reject signiture for a round where StateProofInterval is 0
+func TestWorkerHandleSigIntervalZero(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ origProto := proto
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = origProto
+ }()
+ proto.StateProofInterval = 0
+ config.Consensus[protocol.ConsensusCurrentVersion] = proto
+
+ intervalRound := basics.Round(proto.StateProofInterval)
+ _, w, msg, msgBytes := setBlocksAndMessage(t, intervalRound*2)
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Disconnect, fwd)
+ expected := fmt.Errorf("handleSig: StateProofInterval is 0 for round %d",
+ uint64(msg.Round))
+ require.Equal(t, expected, err)
+}
+
+// relays reject signiture for a round not multiple of StateProofInterval
+func TestWorkerHandleSigNotOnInterval(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ _, w, msg, msgBytes := setBlocksAndMessage(t, basics.Round(600))
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
+
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Disconnect, fwd)
+ expected := fmt.Errorf("handleSig: round %d is not a multiple of SP interval %d",
+ msg.Round, proto.StateProofInterval)
+ require.Equal(t, expected, err)
+}
+
+// relays handle corrupt message
+func TestWorkerHandleSigCorrupt(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var address basics.Address
+ crypto.RandBytes(address[:])
+ p := newPartKey(t, address)
+ defer p.Close()
+
+ s := newWorkerStubs(t, []account.Participation{p.Participation}, 10)
+ w := newTestWorker(t, s)
+
+ msg := sigFromAddr{}
+ msgBytes := protocol.Encode(&msg)
+ msgBytes[0] = 55 // arbitrary value to fail protocol.Decode
+
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
+}
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
index f52e57a21..4971e75e9 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
@@ -64,12 +64,12 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Update the Primary Node configuration
- exec -- cat "$TEST_ROOT_DIR/Primary/config.json" | jq {. |= . + {"CatchpointInterval":4,"EnableRequestLogger":true}} > $TEST_ROOT_DIR/Primary/config.json.new
+ exec -- cat "$TEST_ROOT_DIR/Primary/config.json" | jq {. |= . + {"MaxAcctLookback": 2, "CatchpointInterval": 4,"EnableRequestLogger":true}} > $TEST_ROOT_DIR/Primary/config.json.new
exec rm $TEST_ROOT_DIR/Primary/config.json
exec mv $TEST_ROOT_DIR/Primary/config.json.new $TEST_ROOT_DIR/Primary/config.json
# Update the Second Node configuration
- exec -- cat "$TEST_ROOT_DIR/Node/config.json" | jq {. |= . + {"CatchupParallelBlocks":2}} > $TEST_ROOT_DIR/Node/config.json.new
+ exec -- cat "$TEST_ROOT_DIR/Node/config.json" | jq {. |= . + {"MaxAcctLookback": 2, "CatchupParallelBlocks":2}} > $TEST_ROOT_DIR/Node/config.json.new
exec rm $TEST_ROOT_DIR/Node/config.json
exec mv $TEST_ROOT_DIR/Node/config.json.new $TEST_ROOT_DIR/Node/config.json
@@ -83,6 +83,11 @@ if { [catch {
# Wait until the primary node reaches round 37. At that point, the catchpoint for round 36 is already done.
+ # The rationale is the following:
+ # 1. MaxTxnLife = 33 so catchup would load blocks 3..36
+ # 2. Loading block 2 is blocked by the catchpoint proxy
+ # 3. Next block is 37 that would require balances from round 37-MaxBalLookback = 5 to be accessed, and this is
+ # within the expected range of 3...36
::AlgorandGoal::WaitForRound 37 $TEST_ROOT_DIR/Primary
# Get primary node listening address:
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
index 3090a943a..335d5bafb 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
@@ -55,6 +55,7 @@ func main() {
mu.Unlock()
// prevent requests for block #2 to go through.
if strings.HasSuffix(request.URL.String(), "/block/2") {
+ response.Write([]byte("webProxy prevents block 2 from serving"))
response.WriteHeader(http.StatusBadRequest)
return
}
diff --git a/test/e2e-go/cli/goal/expect/corsTest.exp b/test/e2e-go/cli/goal/expect/corsTest.exp
index 7691b740f..7691b740f 100644..100755
--- a/test/e2e-go/cli/goal/expect/corsTest.exp
+++ b/test/e2e-go/cli/goal/expect/corsTest.exp
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 345c1be6f..f528dabb1 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -50,11 +50,41 @@ proc ::AlgorandGoal::Abort { ERROR } {
# terminate child algod processes, if there are active child processes the test will hang on a test failure
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
+
+ log_user 1
+ set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Primary
+ if { [info exists ::NODE_DATA_DIR] } {
+ set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
+ puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
+ puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
+ set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
+ puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ }
+ set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Node
+ if { [info exists ::NODE_DATA_DIR] } {
+ set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
+ puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
+ puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
+ set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
+ puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ }
+
::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
}
if { [info exists ::GLOBAL_TEST_ALGO_DIR] } {
puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
+
+ log_user 1
+ set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
+ puts "$::GLOBAL_TEST_ALGO_DIR/algod-out.log :\r\n$outLog"
+ set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
+ puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
+ set nodeLog [exec -- tail -n 30 $::GLOBAL_TEST_ALGO_DIR/node.log]
+ puts "$::GLOBAL_TEST_ALGO_DIR/node.log :\r\n$nodeLog"
+
::AlgorandGoal::StopNode $::GLOBAL_TEST_ALGO_DIR
}
@@ -64,7 +94,7 @@ proc ::AlgorandGoal::Abort { ERROR } {
# Utility method to test the process returned value
# Returns 0 when no error code is detected
# When an error code is detected:
-# If ABORT = 1 Calls AlgorandGoal::Abort
+# If ABORT = 1 Calls AlgorandGoal::Abort
# if ABORT = 0 Returns 1 OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP
# If SIGHUP is detected, it ignores it.
proc ::AlgorandGoal::CheckProcessReturnedCode {ABORT} {
@@ -937,11 +967,6 @@ proc ::AlgorandGoal::WaitForRound { WAIT_FOR_ROUND_NUMBER NODE_DATA_DIR } {
eof {
catch wait result;
if { [lindex $result 3] != 0 } {
- log_user 1
- set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
- puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"
}
}
diff --git a/test/e2e-go/cli/goal/expect/testInfraTest.exp b/test/e2e-go/cli/goal/expect/testInfraTest.exp
index 308ac5916..b5785e280 100644
--- a/test/e2e-go/cli/goal/expect/testInfraTest.exp
+++ b/test/e2e-go/cli/goal/expect/testInfraTest.exp
@@ -53,7 +53,7 @@ proc checkProcessReturnedCodeTest {} {
}
# test close sending sighup
- spawn /bin/bash -c "echo 44; sleep 2s; kill -11 $$"
+ spawn /bin/bash -c "echo 44; sleep 2; kill -11 $$"
expect {
44 {
close
@@ -67,7 +67,7 @@ proc checkProcessReturnedCodeTest {} {
}
# same, without close. should get to segv
- spawn /bin/bash -c "echo 44; sleep 2s; kill -11 $$"
+ spawn /bin/bash -c "echo 44; sleep 2; kill -11 $$"
expect {
44 {
puts "not closing"
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index f1eb14b4e..98efc0b26 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -18,6 +18,7 @@ package catchup
import (
"fmt"
+ generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"net/http"
"os/exec"
"path/filepath"
@@ -79,6 +80,30 @@ func (ec *nodeExitErrorCollector) Print() {
}
}
+// awaitCatchpointCreation attempts catchpoint retrieval with retries when the catchpoint is not yet available.
+func awaitCatchpointCreation(client algodclient.RestClient, fixture *fixtures.RestClientFixture, roundWaitCount uint8) (generatedV2.NodeStatusResponse, error) {
+ s, err := client.Status()
+ if err != nil {
+ return generatedV2.NodeStatusResponse{}, err
+ }
+
+ if len(*s.LastCatchpoint) > 0 {
+ return s, nil
+
+ }
+
+ if roundWaitCount-1 > 0 {
+ err = fixture.ClientWaitForRound(client, s.LastRound+1, 10*time.Second)
+ if err != nil {
+ return generatedV2.NodeStatusResponse{}, err
+ }
+
+ return awaitCatchpointCreation(client, fixture, roundWaitCount-1)
+ }
+
+ return generatedV2.NodeStatusResponse{}, fmt.Errorf("No catchpoint exists")
+}
+
func TestBasicCatchpointCatchup(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
@@ -107,6 +132,8 @@ func TestBasicCatchpointCatchup(t *testing.T) {
catchpointCatchupProtocol.SeedRefreshInterval = 8
catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 32
catchpointCatchupProtocol.MaxTxnLife = 33
+ catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
+ catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
if runtime.GOARCH == "amd64" {
// amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
@@ -135,6 +162,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
a.NoError(err)
cfg.CatchpointInterval = 4
+ cfg.MaxAcctLookback = 2
cfg.SaveToDisk(primaryNode.GetDataDir())
cfg.Archival = false
cfg.NetAddress = ""
@@ -155,12 +183,12 @@ func TestBasicCatchpointCatchup(t *testing.T) {
// Let the network make some progress
currentRound := uint64(1)
- targetRound := uint64(37)
+ const targetRound = uint64(37)
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
primaryNodeRestClient.SetAPIVersionAffinity(algodclient.APIVersionV2)
log.Infof("Building ledger history..")
for {
- err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45000*time.Millisecond)
+ err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
a.NoError(err)
if targetRound <= currentRound {
break
@@ -197,13 +225,14 @@ func TestBasicCatchpointCatchup(t *testing.T) {
// wait until node is caught up.
secondNodeRestClient := fixture.GetAlgodClientForController(secondNode)
+
currentRound = uint64(1)
- targetRound = uint64(1)
+ secondNodeTargetRound := uint64(1)
log.Infof("Second node catching up to round 1")
for {
err = fixture.ClientWaitForRound(secondNodeRestClient, currentRound, 10*time.Second)
a.NoError(err)
- if targetRound <= currentRound {
+ if secondNodeTargetRound <= currentRound {
break
}
currentRound++
@@ -211,19 +240,21 @@ func TestBasicCatchpointCatchup(t *testing.T) {
}
log.Infof(" - done catching up!\n")
- primaryNodeStatus, err := primaryNodeRestClient.Status()
+ status, err := awaitCatchpointCreation(primaryNodeRestClient, &fixture, 3)
+ a.NoError(err)
+
+ log.Infof("primary node latest catchpoint - %s!\n", status.LastCatchpoint)
+ _, err = secondNodeRestClient.Catchup(*status.LastCatchpoint)
a.NoError(err)
- a.NotNil(primaryNodeStatus.LastCatchpoint)
- log.Infof("primary node latest catchpoint - %s!\n", *primaryNodeStatus.LastCatchpoint)
- secondNodeRestClient.Catchup(*primaryNodeStatus.LastCatchpoint)
- currentRound = primaryNodeStatus.LastRound
- targetRound = currentRound + 1
- log.Infof("Second node catching up to round 36")
+ currentRound = status.LastRound
+ a.LessOrEqual(targetRound, currentRound)
+ fixtureTargetRound := targetRound + 1
+ log.Infof("Second node catching up to round %v", currentRound)
for {
err = fixture.ClientWaitForRound(secondNodeRestClient, currentRound, 10*time.Second)
a.NoError(err)
- if targetRound <= currentRound {
+ if fixtureTargetRound <= currentRound {
break
}
currentRound++
@@ -233,3 +264,107 @@ func TestBasicCatchpointCatchup(t *testing.T) {
secondNode.StopAlgod()
primaryNode.StopAlgod()
}
+
+func TestCatchpointLabelGeneration(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ testCases := []struct {
+ catchpointInterval uint64
+ archival bool
+ expectLabels bool
+ }{
+ {4, true, true},
+ {4, false, true},
+ {0, true, false},
+ }
+
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("CatchpointInterval_%v/Archival_%v", tc.catchpointInterval, tc.archival), func(t *testing.T) {
+ a := require.New(fixtures.SynchronizedTest(t))
+ log := logging.TestingLog(t)
+
+ consensus := make(config.ConsensusProtocols)
+ const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol")
+ catchpointCatchupProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
+ catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
+ // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
+ catchpointCatchupProtocol.SeedLookback = 2
+ catchpointCatchupProtocol.SeedRefreshInterval = 8
+ catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 32
+ catchpointCatchupProtocol.MaxTxnLife = 33
+ catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
+ catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
+
+ if runtime.GOARCH == "amd64" {
+ // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
+ catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
+ }
+
+ consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+
+ errorsCollector := nodeExitErrorCollector{t: fixtures.SynchronizedTest(t)}
+ defer errorsCollector.Print()
+
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Primary")
+ a.NoError(err)
+
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ cfg.CatchpointInterval = tc.catchpointInterval
+ cfg.Archival = tc.archival
+ cfg.MaxAcctLookback = 2
+ cfg.SaveToDisk(primaryNode.GetDataDir())
+
+ // start the primary node
+ _, err = primaryNode.StartAlgod(nodecontrol.AlgodStartArgs{
+ PeerAddress: "",
+ ListenIP: "",
+ RedirectOutput: true,
+ RunUnderHost: false,
+ TelemetryOverride: "",
+ ExitErrorCallback: errorsCollector.nodeExitWithError,
+ })
+ a.NoError(err)
+
+ // Let the network make some progress
+ currentRound := uint64(1)
+ targetRound := uint64(41)
+ primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
+ primaryNodeRestClient.SetAPIVersionAffinity(algodclient.APIVersionV2)
+ log.Infof("Building ledger history..")
+ for {
+ err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
+ a.NoError(err)
+ if targetRound <= currentRound {
+ break
+ }
+ currentRound++
+
+ }
+ log.Infof("done building!\n")
+
+ primaryNodeStatus, err := primaryNodeRestClient.Status()
+ a.NoError(err)
+ a.NotNil(primaryNodeStatus.LastCatchpoint)
+ if tc.expectLabels {
+ a.NotEmpty(*primaryNodeStatus.LastCatchpoint)
+ } else {
+ a.Empty(*primaryNodeStatus.LastCatchpoint)
+ }
+ primaryNode.StopAlgod()
+ })
+ }
+}
diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go
deleted file mode 100644
index 5c00286a5..000000000
--- a/test/e2e-go/features/compactcert/compactcert_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compactcert
-
-import (
- "path/filepath"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
- "github.com/algorand/go-algorand/test/framework/fixtures"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestCompactCerts(t *testing.T) {
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- t.Skip("Disabling since they need work and shouldn't block releases")
- t.Parallel()
- r := require.New(fixtures.SynchronizedTest(t))
-
- configurableConsensus := make(config.ConsensusProtocols)
- consensusVersion := protocol.ConsensusVersion("test-fast-compactcert")
- consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
- consensusParams.CompactCertRounds = 8
- consensusParams.CompactCertTopVoters = 1024
- consensusParams.CompactCertVotersLookback = 2
- consensusParams.CompactCertWeightThreshold = (1 << 32) * 30 / 100
- consensusParams.CompactCertSecKQ = 128
- consensusParams.EnableStateProofKeyregCheck = true
- configurableConsensus[consensusVersion] = consensusParams
-
- tmp := config.Consensus[protocol.ConsensusFuture]
- config.Consensus[protocol.ConsensusFuture] = consensusParams
- defer func() {
- config.Consensus[protocol.ConsensusFuture] = tmp
- }()
-
- var fixture fixtures.RestClientFixture
- fixture.SetConsensus(configurableConsensus)
- fixture.Setup(t, filepath.Join("nettemplates", "CompactCert.json"))
- defer fixture.Shutdown()
-
- restClient, err := fixture.NC.AlgodClient()
- r.NoError(err)
-
- node0Client := fixture.GetLibGoalClientForNamedNode("Node0")
- node0Wallet, err := node0Client.GetUnencryptedWalletHandle()
- r.NoError(err)
- node0AccountList, err := node0Client.ListAddresses(node0Wallet)
- r.NoError(err)
- node0Account := node0AccountList[0]
-
- node1Client := fixture.GetLibGoalClientForNamedNode("Node1")
- node1Wallet, err := node1Client.GetUnencryptedWalletHandle()
- r.NoError(err)
- node1AccountList, err := node1Client.ListAddresses(node1Wallet)
- r.NoError(err)
- node1Account := node1AccountList[0]
-
- var lastCertBlock v1.Block
- libgoal := fixture.LibGoalClient
- for rnd := uint64(1); rnd <= consensusParams.CompactCertRounds*4; rnd++ {
- // send a dummy payment transaction.
- minTxnFee, _, err := fixture.CurrentMinFeeAndBalance()
- r.NoError(err)
-
- _, err = node0Client.SendPaymentFromUnencryptedWallet(node0Account, node1Account, minTxnFee, rnd, nil)
- r.NoError(err)
-
- err = fixture.WaitForRound(rnd, 30*time.Second)
- r.NoError(err)
-
- blk, err := libgoal.Block(rnd)
- r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
-
- t.Logf("Round %d, block %v\n", rnd, blk)
-
- if (rnd % consensusParams.CompactCertRounds) == 0 {
- // Must have a merkle commitment for participants
- r.True(len(blk.CompactCertVoters) > 0)
- r.True(blk.CompactCertVotersTotal != 0)
-
- // Special case: bootstrap validation with the first block
- // that has a merkle root.
- if lastCertBlock.Round == 0 {
- lastCertBlock = blk
- }
- }
-
- for lastCertBlock.Round != 0 && lastCertBlock.Round+consensusParams.CompactCertRounds < blk.CompactCertNextRound {
- nextCertRound := lastCertBlock.Round + consensusParams.CompactCertRounds
-
- // Find the cert transaction
- res, err := restClient.TransactionsByAddr(transactions.CompactCertSender.String(), 0, rnd, 4)
- r.NoError(err)
-
- var compactCert compactcert.Cert
- compactCertFound := false
- for _, txn := range res.Transactions {
- r.Equal(txn.Type, string(protocol.CompactCertTx))
- r.True(txn.CompactCert != nil)
- if txn.CompactCert.CertRound == nextCertRound {
- err = protocol.Decode(txn.CompactCert.Cert, &compactCert)
- r.NoError(err)
- compactCertFound = true
- }
- }
- r.True(compactCertFound)
-
- nextCertBlock, err := libgoal.Block(nextCertRound)
- r.NoError(err)
-
- nextCertBlockRaw, err := libgoal.RawBlock(nextCertRound)
- r.NoError(err)
-
- var nextCertBlockDecoded rpcs.EncodedBlockCert
- err = protocol.Decode(nextCertBlockRaw, &nextCertBlockDecoded)
- r.NoError(err)
-
- var votersRoot = make([]byte, compactcert.HashSize)
- copy(votersRoot[:], lastCertBlock.CompactCertVoters)
-
- provenWeight, overflowed := basics.Muldiv(lastCertBlock.CompactCertVotersTotal, uint64(consensusParams.CompactCertWeightThreshold), 1<<32)
- r.False(overflowed)
-
- ccparams := compactcert.Params{
- Msg: nextCertBlockDecoded.Block.BlockHeader,
- ProvenWeight: provenWeight,
- SigRound: basics.Round(nextCertBlock.Round),
- SecKQ: consensusParams.CompactCertSecKQ,
- }
- verif := compactcert.MkVerifier(ccparams, votersRoot)
- err = verif.Verify(&compactCert)
- r.NoError(err)
-
- lastCertBlock = nextCertBlock
- }
- }
-
- r.Equalf(consensusParams.CompactCertRounds*3, lastCertBlock.Round, "the expected last certificate block wasn't the one that was observed")
-}
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
index 9cf58310a..5047d7cd1 100644
--- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -22,8 +22,6 @@ package participation
import (
"fmt"
- "io/ioutil"
- "os"
"path/filepath"
"testing"
"time"
@@ -40,12 +38,8 @@ import (
// installParticipationKey generates a new key for a given account and installs it with the client.
func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
- dir, err := ioutil.TempDir("", "temporary_partkey_dir")
- require.NoError(t, err)
- defer os.RemoveAll(dir)
-
// Install overlapping participation keys...
- part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir)
+ part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, t.TempDir())
require.NoError(t, err)
require.NotNil(t, filePath)
require.Equal(t, addr, part.Parent.String())
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index a09b566a7..97ac14e00 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -291,8 +291,8 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) {
// we try to register online with a period in which we don't have stateproof keys
partKeyFirstValid := uint64(1)
- // TODO: Change consensus version when compact certs are deployed
- partKeyLastValid := config.Consensus[protocol.ConsensusFuture].CompactCertRounds - 1
+ // TODO: Change consensus version when state proofs are deployed
+ partKeyLastValid := config.Consensus[protocol.ConsensusFuture].StateProofInterval - 1
partkeyResponse, _, err := client.GenParticipationKeys(newAccount, partKeyFirstValid, partKeyLastValid, 1000)
a.NoError(err, "rest client should be able to add participation key to new account")
a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account")
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
new file mode 100644
index 000000000..5ad6f4998
--- /dev/null
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -0,0 +1,1127 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package stateproofs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ sp "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/nodecontrol"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type accountFetcher struct {
+ nodeName string
+ accountNumber int
+}
+
+func (a accountFetcher) getAccount(r *require.Assertions, f *fixtures.RestClientFixture) string {
+ node0Client := f.GetLibGoalClientForNamedNode(a.nodeName)
+ node0Wallet, err := node0Client.GetUnencryptedWalletHandle()
+ r.NoError(err)
+ node0AccountList, err := node0Client.ListAddresses(node0Wallet)
+ r.NoError(err)
+ return node0AccountList[a.accountNumber]
+}
+
+func (a accountFetcher) getBalance(r *require.Assertions, f *fixtures.RestClientFixture) uint64 {
+ balance, _ := f.GetBalanceAndRound(a.getAccount(r, f))
+ return balance
+}
+
+func (a accountFetcher) goOffline(r *require.Assertions, f *fixtures.RestClientFixture, round uint64) {
+ account0 := a.getAccount(r, f)
+
+ minTxnFee, _, err := f.CurrentMinFeeAndBalance()
+ r.NoError(err)
+
+ client0 := f.GetLibGoalClientForNamedNode(a.nodeName)
+ txn, err := client0.MakeUnsignedGoOfflineTx(account0, round, round+1000, minTxnFee, [32]byte{})
+ r.NoError(err)
+ wallet0, err := client0.GetUnencryptedWalletHandle()
+ r.NoError(err)
+ _, err = client0.SignAndBroadcastTransaction(wallet0, nil, txn)
+ r.NoError(err)
+}
+
+type paymentSender struct {
+ from accountFetcher
+ to accountFetcher
+ amount uint64
+}
+
+func (p paymentSender) sendPayment(a *require.Assertions, f *fixtures.RestClientFixture, round uint64) {
+ account0 := p.from.getAccount(a, f)
+ account1 := p.to.getAccount(a, f)
+
+ minTxnFee, _, err := f.CurrentMinFeeAndBalance()
+ a.NoError(err)
+
+ client0 := f.GetLibGoalClientForNamedNode(p.from.nodeName)
+ _, err = client0.SendPaymentFromUnencryptedWallet(account0, account1, minTxnFee, p.amount, []byte{byte(round)})
+ a.NoError(err)
+}
+
+const timeoutUntilNextRound = 3 * time.Minute
+
+func TestStateProofs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+ defer fixture.Shutdown()
+
+ verifyStateProofsCreation(t, &fixture, consensusParams)
+}
+
+func TestStateProofsMultiWallets(t *testing.T) {
+ t.Skip("this test is heavy and should be run manually")
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ // Stateproof can be generated even if not all nodes function correctly. e.g node can be offline
+ // and stateproofs might still get generated. in order to make sure that all nodes work correctly
+ // we want the network to fail in generating stateproof if one node is not working correctly.
+ // For that we will increase the proven Weight to be close to 100%. However, this change might not be enough.
+ // if the signed Weight and the Proven Weight are very close to each other the number of reveals in the state proof
+ // will exceed the MAX_NUMBER_OF_REVEALS and proofs would not get generated
+ // for that reason we need to the decrease the StateProofStrengthTarget creating a "weak stateproof"
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 4
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProofMultiWallets.json"))
+ defer fixture.Shutdown()
+
+ verifyStateProofsCreation(t, &fixture, consensusParams)
+}
+
+func verifyStateProofsCreation(t *testing.T, fixture *fixtures.RestClientFixture, consensusParams config.ConsensusParams) {
+ r := require.New(fixtures.SynchronizedTest(t))
+
+ var lastStateProofBlock bookkeeping.Block
+ var lastStateProofMessage stateproofmsg.Message
+ libgoal := fixture.LibGoalClient
+
+ expectedNumberOfStateProofs := uint64(4)
+ // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
+ for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ // send a dummy payment transaction to create non-empty blocks.
+ paymentSender{
+ from: accountFetcher{nodeName: "Node0", accountNumber: 0},
+ to: accountFetcher{nodeName: "Node1", accountNumber: 0},
+ amount: 1,
+ }.sendPayment(r, fixture, rnd)
+
+ err := fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ } else {
+ r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) == 0)
+ r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight == basics.MicroAlgos{})
+ }
+
+ for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+ }
+
+ r.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
+}
+
+func TestStateProofOverlappingKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+ if testing.Short() {
+ t.Skip()
+ }
+
+ r := require.New(fixtures.SynchronizedTest(t))
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 3
+ consensusParams.AgreementFilterTimeout = 1000 * time.Millisecond
+ consensusParams.AgreementFilterTimeoutPeriod0 = 1000 * time.Millisecond
+ consensusParams.SeedLookback = 2
+ consensusParams.SeedRefreshInterval = 8
+ consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 32
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+ defer fixture.Shutdown()
+
+ // Get node libgoal clients in order to update their participation keys
+ var libgoalNodeClients [5]libgoal.Client
+ for i := 0; i < 5; i++ {
+ nodeName := fmt.Sprintf("Node%d", i)
+ c := fixture.GetLibGoalClientForNamedNode(nodeName)
+ libgoalNodeClients[i] = c
+ }
+
+ // Get account address of each participating node
+ var accounts [5]string
+ for i, c := range libgoalNodeClients {
+ parts, err := c.GetParticipationKeys() // should have 1 participation per node
+ r.NoError(err)
+ accounts[i] = parts[0].Address
+ }
+
+ var participations [5]account.Participation
+ var lastStateProofBlock bookkeeping.Block
+ var lastStateProofMessage stateproofmsg.Message
+ libgoalClient := fixture.LibGoalClient
+
+ k, err := libgoalNodeClients[0].GetParticipationKeys()
+ r.NoError(err)
+ voteLastValid := k[0].Key.VoteLastValid
+ expectedNumberOfStateProofs := uint64(10)
+ for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ if rnd == voteLastValid-64 { // allow some buffer period before the voting keys are expired (for the keyreg to take effect)
+ // Generate participation keys (for the same accounts)
+ for i := 0; i < 5; i++ {
+ // Overlapping stateproof keys (the key for round 0 is valid up to 256)
+ _, part, err := installParticipationKey(t, libgoalNodeClients[i], accounts[i], 0, 200)
+ r.NoError(err)
+ participations[i] = part
+ }
+ // Register overlapping participation keys
+ for i := 0; i < 5; i++ {
+ registerParticipationAndWait(t, libgoalNodeClients[i], participations[i])
+ }
+ }
+
+ // send a dummy payment transaction.
+ paymentSender{
+ from: accountFetcher{nodeName: "Node0", accountNumber: 0},
+ to: accountFetcher{nodeName: "Node1", accountNumber: 0},
+ amount: 1,
+ }.sendPayment(r, &fixture, rnd)
+
+ err = fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoalClient.BookkeepingBlock(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ }
+
+ for lastStateProofBlock.Round() != 0 && lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+ }
+
+ r.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
+}
+
+func TestStateProofMessageCommitmentVerification(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ r := require.New(fixtures.SynchronizedTest(t))
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+ defer fixture.Shutdown()
+
+ libgoalClient := fixture.LibGoalClient
+
+ var startRound = uint64(1)
+ var nextStateProofRound = uint64(0)
+ var firstStateProofRound = 2 * consensusParams.StateProofInterval
+
+ for rnd := startRound; nextStateProofRound <= firstStateProofRound; rnd++ {
+ paymentSender{
+ from: accountFetcher{nodeName: "Node0", accountNumber: 0},
+ to: accountFetcher{nodeName: "Node1", accountNumber: 0},
+ amount: 1,
+ }.sendPayment(r, &fixture, rnd)
+
+ err := fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoalClient.BookkeepingBlock(rnd)
+ r.NoError(err)
+
+ nextStateProofRound = uint64(blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ }
+
+ _, stateProofMessage := getStateProofByLastRound(r, &fixture, firstStateProofRound, 1)
+ t.Logf("found first stateproof, attesting to rounds %d - %d. Verifying.\n", stateProofMessage.FirstAttestedRound, stateProofMessage.LastAttestedRound)
+
+ for rnd := stateProofMessage.FirstAttestedRound; rnd <= stateProofMessage.LastAttestedRound; rnd++ {
+ proofResp, singleLeafProof, err := fixture.LightBlockHeaderProof(rnd)
+ r.NoError(err)
+
+ blk, err := libgoalClient.BookkeepingBlock(rnd)
+ r.NoError(err)
+
+ lightBlockHeader := blk.ToLightBlockHeader()
+
+ elems := make(map[uint64]crypto.Hashable)
+ elems[proofResp.Index] = &lightBlockHeader
+ err = merklearray.VerifyVectorCommitment(stateProofMessage.BlockHeadersCommitment, elems, singleLeafProof.ToProof())
+ r.NoError(err)
+ }
+}
+
+func getDefaultStateProofConsensusParams() config.ConsensusParams {
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ consensusParams.StateProofInterval = 16
+ consensusParams.StateProofTopVoters = 1024
+ consensusParams.StateProofVotersLookback = 2
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 30 / 100
+ consensusParams.StateProofStrengthTarget = 256
+ consensusParams.StateProofMaxRecoveryIntervals = 6
+ consensusParams.EnableStateProofKeyregCheck = true
+ consensusParams.AgreementFilterTimeout = 1500 * time.Millisecond
+ consensusParams.AgreementFilterTimeoutPeriod0 = 1500 * time.Millisecond
+
+ return consensusParams
+}
+
+func getStateProofByLastRound(r *require.Assertions, fixture *fixtures.RestClientFixture, stateProofLatestRound uint64, expectedNumberOfStateProofs uint64) (sp.StateProof, stateproofmsg.Message) {
+ restClient, err := fixture.NC.AlgodClient()
+ r.NoError(err)
+
+ curRound, err := fixture.LibGoalClient.CurrentRound()
+ r.NoError(err)
+
+ res, err := restClient.TransactionsByAddr(transactions.StateProofSender.String(), 0, curRound, expectedNumberOfStateProofs+1)
+ r.NoError(err)
+
+ var stateProof sp.StateProof
+ var stateProofMessage stateproofmsg.Message
+ for _, txn := range res.Transactions {
+ r.Equal(txn.Type, string(protocol.StateProofTx))
+ r.True(txn.StateProof != nil)
+ err = protocol.Decode(txn.StateProof.StateProofMessage, &stateProofMessage)
+ r.NoError(err)
+ if stateProofMessage.LastAttestedRound == stateProofLatestRound {
+ err = protocol.Decode(txn.StateProof.StateProof, &stateProof)
+ r.NoError(err)
+
+ return stateProof, stateProofMessage
+ }
+ }
+
+ r.FailNow("no state proof with latest round %d found", stateProofLatestRound)
+
+ // Should never get here
+ return sp.StateProof{}, stateproofmsg.Message{}
+}
+
+func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClientFixture, nextStateProofRound uint64, prevStateProofMessage stateproofmsg.Message, lastStateProofBlock bookkeeping.Block, consensusParams config.ConsensusParams, expectedNumberOfStateProofs uint64) (stateproofmsg.Message, bookkeeping.Block) {
+ stateProof, stateProofMessage := getStateProofByLastRound(r, fixture, nextStateProofRound, expectedNumberOfStateProofs)
+
+ nextStateProofBlock, err := fixture.LibGoalClient.BookkeepingBlock(nextStateProofRound)
+
+ r.NoError(err)
+
+ if !prevStateProofMessage.MsgIsZero() {
+ //if we have a previous stateproof message we can verify the current stateproof using data from it
+ verifier := sp.MkVerifierWithLnProvenWeight(prevStateProofMessage.VotersCommitment, prevStateProofMessage.LnProvenWeight, consensusParams.StateProofStrengthTarget)
+ err = verifier.Verify(uint64(nextStateProofBlock.Round()), stateProofMessage.Hash(), &stateProof)
+ r.NoError(err)
+ }
+ var votersRoot = make([]byte, sp.HashSize)
+ copy(votersRoot[:], lastStateProofBlock.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment)
+
+ provenWeight, overflowed := basics.Muldiv(lastStateProofBlock.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw, uint64(consensusParams.StateProofWeightThreshold), 1<<32)
+ r.False(overflowed)
+
+ verifier, err := sp.MkVerifier(votersRoot, provenWeight, consensusParams.StateProofStrengthTarget)
+ r.NoError(err)
+
+ err = verifier.Verify(uint64(nextStateProofBlock.Round()), stateProofMessage.Hash(), &stateProof)
+ r.NoError(err)
+ return stateProofMessage, nextStateProofBlock
+}
+
+// TestRecoverFromLaggingStateProofChain simulates a situation where the stateproof chain is lagging after the main chain.
+// If the missing data is being accepted before StateProofMaxRecoveryIntervals * StateProofInterval rounds have passed, nodes should
+// be able to produce stateproofs and continue as normal
+func TestRecoverFromLaggingStateProofChain(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ r := require.New(fixtures.SynchronizedTest(t))
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ // Stateproof can be generated even if not all nodes function correctly. e.g node can be offline
+ // and stateproofs might still get generated. in order to make sure that all nodes work correctly
+ // we want the network to fail in generating stateproof if one node is not working correctly.
+ // For that we will increase the proven Weight to be close to 100%. However, this change might not be enough.
+ // if the signed Weight and the Proven Weight are very close to each other the number of reveals in the state proof
+ // will exceed the MAX_NUMBER_OF_REVEALS and proofs would not get generated
+ // for that reason we need to the decrease the StateProofStrengthTarget creating a "weak cert"
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 4
+ consensusParams.StateProofMaxRecoveryIntervals = 4
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+ defer fixture.Shutdown()
+
+ err := fixture.WaitForRound(1, timeoutUntilNextRound)
+ r.NoError(err)
+
+ dir, err := fixture.GetNodeDir("Node4")
+ r.NoError(err)
+
+ nc := nodecontrol.MakeNodeController(fixture.GetBinDir(), dir)
+ //Stop one of the nodes to prevent SP generation due to insufficient signatures.
+ nc.FullStop()
+
+ var lastStateProofBlock bookkeeping.Block
+ var lastStateProofMessage stateproofmsg.Message
+ libgoal := fixture.LibGoalClient
+
+ expectedNumberOfStateProofs := uint64(4)
+ // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
+ for rnd := uint64(2); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ // Start the node in the last interval after which the SP will be abandoned if SPs are not generated.
+ if rnd == (consensusParams.StateProofMaxRecoveryIntervals)*consensusParams.StateProofInterval {
+ t.Logf("at round %d starting node\n", rnd)
+ dir, err = fixture.GetNodeDir("Node4")
+ r.NoError(err)
+ fixture.StartNode(dir)
+ }
+
+ // send a dummy payment transaction to create non-empty blocks
+ paymentSender{
+ from: accountFetcher{nodeName: "Node0", accountNumber: 0},
+ to: accountFetcher{nodeName: "Node1", accountNumber: 0},
+ amount: 1,
+ }.sendPayment(r, &fixture, rnd)
+
+ err = fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ }
+
+ // in case StateProofNextRound has changed (larger than the lastStateProofBlock ) we verify the new stateproof.
+ // since the stateproof chain is catching up there would be several proofs to check
+ for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+ }
+ r.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
+}
+
+// TestUnableToRecoverFromLaggingStateProofChain simulates a situation where the stateproof chain is lagging after the main chain.
+// unlike TestRecoverFromLaggingStateProofChain, in this test the node will start at a later round and the network will not be able to produce stateproofs/
+func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ r := require.New(fixtures.SynchronizedTest(t))
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
+ consensusParams := getDefaultStateProofConsensusParams()
+ // Stateproof can be generated even if not all nodes function correctly. e.g node can be offline
+ // and stateproofs might still get generated. in order to make sure that all nodes work correctly
+ // we want the network to fail in generating stateproof if one node is not working correctly.
+ // For that we will increase the proven Weight to be close to 100%. However, this change might not be enough.
+ // if the signed Weight and the Proven Weight are very close to each other the number of reveals in the state proof
+ // will exceed the MAX_NUMBER_OF_REVEALS and proofs would not get generated
+ // for that reason we need to the decrease the StateProofStrengthTarget creating a "weak cert"
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 4
+ consensusParams.StateProofMaxRecoveryIntervals = 4
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+ defer fixture.Shutdown()
+
+ err := fixture.WaitForRound(1, timeoutUntilNextRound)
+ r.NoError(err)
+
+ dir, err := fixture.GetNodeDir("Node4")
+ r.NoError(err)
+ nc := nodecontrol.MakeNodeController(fixture.GetBinDir(), dir)
+ nc.FullStop()
+
+ var lastStateProofBlock bookkeeping.Block
+ libgoal := fixture.LibGoalClient
+
+ expectedNumberOfStateProofs := uint64(4)
+ // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
+ for rnd := uint64(2); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ if rnd == (consensusParams.StateProofMaxRecoveryIntervals+2)*consensusParams.StateProofInterval {
+ t.Logf("at round %d starting node\n", rnd)
+ dir, err = fixture.GetNodeDir("Node4")
+ r.NoError(err)
+ fixture.StartNode(dir)
+ }
+
+ // send a dummy payment transaction to create non-empty blocks
+ paymentSender{
+ from: accountFetcher{nodeName: "Node0", accountNumber: 0},
+ to: accountFetcher{nodeName: "Node1", accountNumber: 0},
+ amount: 1,
+ }.sendPayment(r, &fixture, rnd)
+
+ err = fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ r.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ r.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ }
+
+ if lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ r.FailNow("found a state proof at round %d", blk.Round())
+ }
+ }
+}
+
+// installParticipationKey generates a new key for a given account and installs it with the client.
+func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
+ dir, err := ioutil.TempDir("", "temporary_partkey_dir")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ // Install overlapping participation keys...
+ part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir)
+ require.NoError(t, err)
+ require.NotNil(t, filePath)
+ require.Equal(t, addr, part.Parent.String())
+
+ resp, err = client.AddParticipationKey(filePath)
+ return
+}
+
+func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) generated.NodeStatusResponse {
+ currentRnd, err := client.CurrentRound()
+ require.NoError(t, err)
+ sAccount := part.Address().String()
+ sWH, err := client.GetUnencryptedWalletHandle()
+ require.NoError(t, err)
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, 1000, currentRnd, uint64(part.LastValid), [32]byte{}, true)
+ assert.NoError(t, err)
+ require.Equal(t, sAccount, goOnlineTx.Src().String())
+ onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
+ require.NoError(t, err)
+ require.NotEmpty(t, onlineTxID)
+ status, err := client.WaitForRound(currentRnd + 1)
+ require.NoError(t, err)
+ return status
+}
+
+// In this test, we have five nodes, where we only need four to create a StateProof.
+// After making the first Stateproof, we transfer three-quarters of the stake of the
+// rich node to the poor node. For both cases, we assert different stakes, that is, to
+// conclude whether the poor node is used to create the StateProof or the rich node.
+func TestAttestorsChangeTest(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensusParams := getDefaultStateProofConsensusParams()
+ // Stateproof can be generated even if not all nodes function correctly. e.g node can be offline
+ // and stateproofs might still get generated. in order to make sure that all nodes work correctly
+ // we want the network to fail in generating stateproof if one node is not working correctly.
+ // For that we will increase the proven Weight to be close to 100%. However, this change might not be enough.
+ // if the signed Weight and the Proven Weight are very close to each other the number of reveals in the state proof
+ // will exceed the MAX_NUMBER_OF_REVEALS and proofs would not get generated
+ // for that reason we need to the decrease the StateProofStrengthTarget creating a "weak cert"
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 4
+ consensusParams.StateProofTopVoters = 4
+
+ configurableConsensus := config.ConsensusProtocols{
+ protocol.ConsensusVersion("test-fast-stateproofs"): consensusParams,
+ }
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "RichAccountStateProof.json"))
+ defer fixture.Shutdown()
+
+ var lastStateProofBlock bookkeeping.Block
+ var lastStateProofMessage stateproofmsg.Message
+ libgoal := fixture.LibGoalClient
+
+ expectedNumberOfStateProofs := uint64(4)
+ // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
+
+ paymentMaker := paymentSender{
+ from: accountFetcher{nodeName: "richNode", accountNumber: 0},
+ to: accountFetcher{nodeName: "poorNode", accountNumber: 0},
+ }
+
+ for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ // Changing the amount to pay. This should transfer most of the money from the rich node to the poor node.
+ if consensusParams.StateProofInterval*2 == rnd {
+ balance := paymentMaker.from.getBalance(a, &fixture)
+ // ensuring that before the test, the rich node (from) has a significantly larger balance.
+ a.True(balance/2 > paymentMaker.to.getBalance(a, &fixture))
+
+ paymentMaker.amount = balance * 9 / 10
+ paymentMaker.sendPayment(a, &fixture, rnd)
+ }
+
+ // verifies that rich account transferred most of its money to the account that sits on poorNode.
+ if consensusParams.StateProofInterval*3 == rnd {
+ a.True(paymentMaker.to.getBalance(a, &fixture) > paymentMaker.from.getBalance(a, &fixture))
+ }
+
+ a.NoError(fixture.WaitForRound(rnd, timeoutUntilNextRound))
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
+
+ // the main part of the test (computing the total stake of the nodes):
+ sum := uint64(0)
+ for i := 1; i <= 3; i++ {
+ sum += accountFetcher{fmt.Sprintf("Node%d", i), 0}.getBalance(a, &fixture)
+ }
+
+ richNodeStake := accountFetcher{"richNode", 0}.getBalance(a, &fixture)
+ poorNodeStake := accountFetcher{"poorNode", 0}.getBalance(a, &fixture)
+ sum = sum + richNodeStake + poorNodeStake
+
+ a.Equal(sum, stake)
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ } else {
+ a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight == basics.MicroAlgos{})
+ }
+
+ for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+ }
+
+ a.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
+}
+
+func TestTotalWeightChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensusParams := getDefaultStateProofConsensusParams()
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
+ consensusParams.StateProofStrengthTarget = 4
+ consensusParams.StateProofTopVoters = 4
+ //consensusParams.StateProofInterval = 32
+
+ configurableConsensus := config.ConsensusProtocols{
+ protocol.ConsensusVersion("test-fast-stateproofs"): consensusParams,
+ }
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.Setup(t, filepath.Join("nettemplates", "RichAccountStateProof.json"))
+ defer fixture.Shutdown()
+
+ var lastStateProofBlock bookkeeping.Block
+ var lastStateProofMessage stateproofmsg.Message
+ libgoal := fixture.LibGoalClient
+
+ richNode := accountFetcher{nodeName: "richNode", accountNumber: 0}
+
+ expectedNumberOfStateProofs := uint64(4)
+ // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
+
+ for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
+ // Rich node goes offline
+ if consensusParams.StateProofInterval*2-8 == rnd {
+ // subtract 8 rounds since the total online stake is calculated prior to the actual state proof round (lookback)
+ richNode.goOffline(a, &fixture, rnd)
+ }
+
+ a.NoError(fixture.WaitForRound(rnd, 30*time.Second))
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ a.Greater(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment), 0)
+ totalStake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
+ a.NotEqual(basics.MicroAlgos{}, totalStake)
+
+ if rnd <= consensusParams.StateProofInterval {
+ a.Equal(uint64(10000000000000000), totalStake)
+ } else { // richNode should be offline by now
+ a.Greater(uint64(10000000000000000), totalStake)
+ }
+
+ // Special case: bootstrap validation with the first block
+ // that has a merkle root.
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+ } else {
+ a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight == basics.MicroAlgos{})
+ }
+
+ for lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(a, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams, expectedNumberOfStateProofs)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+ }
+
+ a.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
+}
+
+// TestSPWithTXPoolFull makes sure a SP txn goes into the pool when the pool is full
+func TestSPWithTXPoolFull(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusParams := getDefaultStateProofConsensusParams()
+ consensusParams.StateProofInterval = 4
+ configurableConsensus[protocol.ConsensusFuture] = consensusParams
+
+ fixture.SetConsensus(configurableConsensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+
+ dir, err := fixture.GetNodeDir("Primary")
+ a.NoError(err)
+
+ cfg, err := config.LoadConfigFromDisk(dir)
+ a.NoError(err)
+ cfg.TxPoolSize = 0
+ cfg.SaveToDisk(dir)
+
+ dir, err = fixture.GetNodeDir("Node")
+ a.NoError(err)
+ cfg.SaveToDisk(dir)
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ relay := fixture.GetLibGoalClientForNamedNode("Primary")
+
+ params, err := relay.SuggestedParams()
+ require.NoError(t, err)
+
+ var genesisHash crypto.Digest
+ copy(genesisHash[:], params.GenesisHash)
+
+ round := uint64(0)
+ for round < uint64(20) {
+ params, err = relay.SuggestedParams()
+ require.NoError(t, err)
+
+ round = params.LastRound
+ err = fixture.WaitForRound(round+1, 6*time.Second)
+ require.NoError(t, err)
+
+ b, err := relay.Block(round + 1)
+ require.NoError(t, err)
+ if len(b.Transactions.Transactions) == 0 {
+ continue
+ }
+ require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[0].Type)
+ var msg stateproofmsg.Message
+ err = protocol.Decode(b.Transactions.Transactions[0].StateProof.StateProofMessage, &msg)
+ require.NoError(t, err)
+ require.Equal(t, uint64(8), msg.LastAttestedRound)
+ break
+ }
+ require.Less(t, round, uint64(20))
+}
+
+// TestAtMostOneSPFullPool tests that there is at most one SP txn is admitted to the pool per roound
+// when the pool is full. Note that the test sets TxPoolSize to 0 to simulate a full pool, which
+// guarantees that no more than 1 SP txn get into a block. In normal configuration, it is
+// possible to have multiple SPs getting into the same block when the pool is full.
+func TestAtMostOneSPFullPool(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusParams := getDefaultStateProofConsensusParams()
+ consensusParams.StateProofInterval = 4
+ configurableConsensus[protocol.ConsensusFuture] = consensusParams
+
+ fixture.SetConsensus(configurableConsensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
+
+ dir, err := fixture.GetNodeDir("Primary")
+ a.NoError(err)
+
+ cfg, err := config.LoadConfigFromDisk(dir)
+ a.NoError(err)
+ cfg.TxPoolSize = 0
+ cfg.SaveToDisk(dir)
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ relay := fixture.GetLibGoalClientForNamedNode("Primary")
+
+ params, err := relay.SuggestedParams()
+ require.NoError(t, err)
+
+ // Check that the first 2 stateproofs are added to the blockchain in different rounds
+ round := uint64(0)
+ expectedSPRound := consensusParams.StateProofInterval * 2
+ for round < consensusParams.StateProofInterval*10 {
+ round = params.LastRound
+
+ err := fixture.WaitForRound(round+1, 6*time.Second)
+ require.NoError(t, err)
+
+ b, err := relay.Block(round + 1)
+ require.NoError(t, err)
+
+ params, err = relay.SuggestedParams()
+ require.NoError(t, err)
+ if len(b.Transactions.Transactions) == 0 {
+ continue
+ }
+ tid := 0
+ // Find a SP transaction in the block. The SP should be for StateProofIntervalLatestRound expectedSPRound
+ // Since the pool is full, only one additional SP transaction is allowed in. So only one SP can be added to be block
+ // break after finding it, and look for the next one in a subsequent block
+ // In case two SP transactions get into the same block, the following loop will not find the second one, and fail the test
+ for ; tid < len(b.Transactions.Transactions); tid++ {
+ if b.Transactions.Transactions[tid].Type == string(protocol.StateProofTx) {
+ require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[tid].Type)
+
+ var msg stateproofmsg.Message
+ err = protocol.Decode(b.Transactions.Transactions[tid].StateProof.StateProofMessage, &msg)
+ require.NoError(t, err)
+ require.Equal(t, int(expectedSPRound), int(msg.LastAttestedRound))
+
+ expectedSPRound = expectedSPRound + consensusParams.StateProofInterval
+ break
+ }
+ }
+ if expectedSPRound == consensusParams.StateProofInterval*4 {
+ break
+ }
+ }
+ // If waited till round 20 and did not yet get the stateproof with last round 12, fail the test
+ require.Less(t, round, consensusParams.StateProofInterval*10)
+}
+
+type specialAddr string
+
+func (a specialAddr) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.SpecialAddr, []byte(a)
+}
+
+// TestSPWithCounterReset tests if the state proof transaction is getting into the pool and eventually
+// at most one SP is getting into the block when the transaction pool is full.
+// Bad SP and payment transaction traffic is added to increase the odds of getting SP txn into the pool
+// in the same round.
+func TestAtMostOneSPFullPoolWithLoad(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusParams := getDefaultStateProofConsensusParams()
+ consensusParams.StateProofInterval = 4
+ configurableConsensus[protocol.ConsensusFuture] = consensusParams
+
+ fixture.SetConsensus(configurableConsensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
+
+ dir, err := fixture.GetNodeDir("Primary")
+ a.NoError(err)
+
+ cfg, err := config.LoadConfigFromDisk(dir)
+ a.NoError(err)
+ cfg.TxPoolSize = 0
+ cfg.SaveToDisk(dir)
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ relay := fixture.GetLibGoalClientForNamedNode("Primary")
+
+ params, err := relay.SuggestedParams()
+ require.NoError(t, err)
+
+ var genesisHash crypto.Digest
+ copy(genesisHash[:], params.GenesisHash)
+
+ wg := sync.WaitGroup{}
+ var done uint32
+
+ defer func() {
+ atomic.StoreUint32(&done, uint32(1))
+ wg.Wait()
+ }()
+
+ stxn := getWellformedSPTransaction(params.LastRound+1, genesisHash, consensusParams, t)
+
+ // Send well formed but bad stateproof transactions from two goroutines
+ for spSpam := 0; spSpam < 2; spSpam++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for atomic.LoadUint32(&done) != 1 {
+ _, err := relay.BroadcastTransaction(stxn)
+ // The pool is full, and only one SP transaction will be admitted in per round. Otherwise, pool is full error will be returned
+ // However, if this is the lucky SP transaction to get into the pool, it will eventually be rejected by ValidateStateProof and a different
+ // error will be returned
+ require.Error(t, err)
+ time.Sleep(25 * time.Millisecond)
+ }
+ }()
+ }
+
+ // Send payment transactions from two goroutines
+ for txnSpam := 0; txnSpam < 2; txnSpam++ {
+ wg.Add(1)
+ go func(amt uint64) {
+ defer wg.Done()
+ cntr := uint64(1)
+ params, err := relay.SuggestedParams()
+ require.NoError(t, err)
+
+ ps := paymentSender{
+ from: accountFetcher{nodeName: "Primary", accountNumber: 0},
+ amount: amt,
+ }
+ account0 := ps.from.getAccount(a, &fixture)
+
+ for atomic.LoadUint32(&done) != 1 {
+ ps.amount = cntr
+ cntr = cntr + 1
+ // ignore the returned error (most of the time will be error)
+ _, err := relay.SendPaymentFromUnencryptedWallet(account0, account0, params.Fee, ps.amount, []byte{byte(params.LastRound)})
+ require.Error(t, err)
+ require.Equal(t, "HTTP 400 Bad Request: TransactionPool.checkPendingQueueSize: transaction pool have reached capacity", err.Error())
+ time.Sleep(25 * time.Millisecond)
+ }
+ }(uint64(txnSpam + 1))
+ }
+
+ // Check that the first 2 stateproofs are added to the blockchain
+ round := uint64(0)
+ expectedSPRound := consensusParams.StateProofInterval * 2
+ for round < consensusParams.StateProofInterval*10 {
+ round = params.LastRound
+
+ err := fixture.WaitForRound(round+1, 6*time.Second)
+ require.NoError(t, err)
+
+ b, err := relay.Block(round + 1)
+ require.NoError(t, err)
+
+ params, err = relay.SuggestedParams()
+ require.NoError(t, err)
+ if len(b.Transactions.Transactions) == 0 {
+ continue
+ }
+ tid := 0
+ // Find a SP transaction in the block. The SP should be for StateProofIntervalLatestRound expectedSPRound
+ // Since the pool is full, only one additional SP transaction is allowed in. So only one SP can be added to be block
+ // break after finding it, and look for the next one in a subsequent block
+ // In case two SP transactions get into the same block, the following loop will not find the second one, and fail the test
+ for ; tid < len(b.Transactions.Transactions); tid++ {
+ if b.Transactions.Transactions[tid].Type == string(protocol.StateProofTx) {
+ require.Equal(t, string(protocol.StateProofTx), b.Transactions.Transactions[tid].Type)
+
+ var msg stateproofmsg.Message
+ err = protocol.Decode(b.Transactions.Transactions[tid].StateProof.StateProofMessage, &msg)
+ require.NoError(t, err)
+ require.Equal(t, int(expectedSPRound), int(msg.LastAttestedRound))
+
+ expectedSPRound = expectedSPRound + consensusParams.StateProofInterval
+ break
+ }
+ }
+ if expectedSPRound == consensusParams.StateProofInterval*4 {
+ break
+ }
+ }
+ // Do not check if the SPs were added to the block. TestAtMostOneSPFullPool checks it.
+ // In some environments (ARM) the high load may prevent it.
+}
+
+func getWellformedSPTransaction(round uint64, genesisHash crypto.Digest, consensusParams config.ConsensusParams, t *testing.T) (stxn transactions.SignedTxn) {
+
+ msg := stateproofmsg.Message{}
+ proof := &sp.StateProof{}
+ proto := consensusParams
+
+ stxn.Txn.Type = protocol.StateProofTx
+ stxn.Txn.Sender = transactions.StateProofSender
+ stxn.Txn.FirstValid = basics.Round(round)
+ stxn.Txn.LastValid = basics.Round(round + 1000)
+ stxn.Txn.GenesisHash = genesisHash
+ stxn.Txn.StateProofType = protocol.StateProofBasic
+ stxn.Txn.StateProof = *proof
+ stxn.Txn.Message = msg
+
+ err := stxn.Txn.WellFormed(transactions.SpecialAddresses{}, proto)
+ require.NoError(t, err)
+
+ return stxn
+}
diff --git a/test/e2e-go/features/transactions/application_test.go b/test/e2e-go/features/transactions/application_test.go
index c9a4ac131..a5786cc4f 100644
--- a/test/e2e-go/features/transactions/application_test.go
+++ b/test/e2e-go/features/transactions/application_test.go
@@ -66,7 +66,7 @@ func TestApplication(t *testing.T) {
a.NoError(err)
creator := accountList[0].Address
- wh, err := client.GetUnencryptedWalletHandle()
+ _, err = client.GetUnencryptedWalletHandle()
a.NoError(err)
fee := uint64(1000)
@@ -101,7 +101,7 @@ log
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
- wh, err = client.GetUnencryptedWalletHandle()
+ wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
@@ -122,6 +122,7 @@ log
logs[31] = "c"
b, err := client.BookkeepingBlock(round)
+ a.NoError(err)
for _, ps := range b.Payset {
ed := ps.ApplyData.EvalDelta
ok = checkEqual(logs, ed.Logs)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index fd21364b2..520f8af0e 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -67,10 +67,7 @@ func TestAssetValidRounds(t *testing.T) {
client := fixture.LibGoalClient
// First, test valid rounds to last valid conversion
- var firstValid, lastValid, validRounds uint64
- firstValid = 0
- lastValid = 0
- validRounds = 0
+ var firstValid, lastValid, lastRound, validRounds uint64
params, err := client.SuggestedParams()
a.NoError(err)
@@ -80,29 +77,29 @@ func TestAssetValidRounds(t *testing.T) {
firstValid = 0
lastValid = 0
validRounds = cparams.MaxTxnLife + 1
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ firstValid, lastValid, lastRound, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.NoError(err)
- a.Equal(params.LastRound+1, firstValid)
+ a.Equal(lastRound+1, firstValid)
a.Equal(firstValid+cparams.MaxTxnLife, lastValid)
firstValid = 0
lastValid = 0
validRounds = cparams.MaxTxnLife + 2
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ _, _, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.Error(err)
a.True(strings.Contains(err.Error(), "cannot construct transaction: txn validity period"))
firstValid = 0
lastValid = 0
validRounds = 1
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.NoError(err)
a.Equal(firstValid, lastValid)
firstValid = 1
lastValid = 0
validRounds = 1
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.NoError(err)
a.Equal(uint64(1), firstValid)
a.Equal(firstValid, lastValid)
@@ -110,7 +107,7 @@ func TestAssetValidRounds(t *testing.T) {
firstValid = 1
lastValid = 0
validRounds = cparams.MaxTxnLife
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.NoError(err)
a.Equal(uint64(1), firstValid)
a.Equal(cparams.MaxTxnLife, lastValid)
@@ -118,7 +115,7 @@ func TestAssetValidRounds(t *testing.T) {
firstValid = 100
lastValid = 0
validRounds = cparams.MaxTxnLife
- firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
+ firstValid, lastValid, _, err = client.ComputeValidityRounds(firstValid, lastValid, validRounds)
a.NoError(err)
a.Equal(uint64(100), firstValid)
a.Equal(firstValid+cparams.MaxTxnLife-1, lastValid)
@@ -255,7 +252,6 @@ func TestAssetConfig(t *testing.T) {
a.NoError(err)
confirmed := fixture.WaitForAllTxnsToConfirm(status.LastRound+20, txids)
a.True(confirmed, "creating max number of assets")
- txids = make(map[string]string)
// re-generate wh, since this test takes a while and sometimes
// the wallet handle expires.
@@ -265,7 +261,7 @@ func TestAssetConfig(t *testing.T) {
var tx transactions.Transaction
if config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount != 0 {
// Creating more assets should return an error
- tx, err = client.MakeUnsignedAssetCreateTx(1, false, manager, reserve, freeze, clawback, fmt.Sprintf("toomany"), fmt.Sprintf("toomany"), assetURL, assetMetadataHash, 0)
+ tx, err = client.MakeUnsignedAssetCreateTx(1, false, manager, reserve, freeze, clawback, "toomany", "toomany", assetURL, assetMetadataHash, 0)
_, err = helperFillSignBroadcast(client, wh, account0, tx, err)
a.Error(err)
a.True(strings.Contains(err.Error(), "too many assets in account:"))
@@ -336,7 +332,6 @@ func TestAssetConfig(t *testing.T) {
a.NoError(err)
confirmed = fixture.WaitForAllTxnsToConfirm(status.LastRound+20, txids)
a.True(confirmed, "changing keys")
- txids = make(map[string]string)
info, err = client.AccountInformation(account0)
a.NoError(err)
@@ -672,7 +667,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) {
a.Error(err)
// sending it should fail
txSend, err = client1.MakeUnsignedAssetSendTx(assetID3, 0, account1, "", "")
- txid, err = helperFillSignBroadcast(client1, wh1, account1, txSend, err)
+ _, err = helperFillSignBroadcast(client1, wh1, account1, txSend, err)
a.Error(err)
}
@@ -750,11 +745,11 @@ func TestAssetSend(t *testing.T) {
// An account with no algos should not be able to accept assets
tx, err = client.MakeUnsignedAssetSendTx(nonFrozenIdx, 0, extra, "", "")
- txid, err = helperFillSignBroadcast(client, wh, account0, tx, err)
+ _, err = helperFillSignBroadcast(client, wh, account0, tx, err)
a.NoError(err)
tx, err = client.MakeUnsignedAssetSendTx(nonFrozenIdx, 0, extra, "", "")
- txid, err = helperFillSignBroadcast(client, wh, extra, tx, err)
+ _, err = helperFillSignBroadcast(client, wh, extra, tx, err)
a.Error(err)
a.True(strings.Contains(err.Error(), "overspend"))
a.True(strings.Contains(err.Error(), "tried to spend"))
@@ -977,6 +972,7 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
+ a.NoError(err)
submitAndWaitForTransaction(manager, tx, "destroying assets", client, fixture, a)
// Check again that asset is destroyed
@@ -986,6 +982,7 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
// Should be able to close now
wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
_, err = client.SendPaymentFromWallet(wh, nil, account0, "", 0, 0, nil, reserve, 0, 0)
a.NoError(err)
}
@@ -1047,6 +1044,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
_, curRound := fixture.GetBalanceAndRound(account0)
nodeStatus, _ := client.Status()
consParams, err := client.ConsensusParams(nodeStatus.LastRound)
+ a.NoError(err)
err = fixture.WaitForRoundWithTimeout(curRound + consParams.MaxBalLookback + 1)
a.NoError(err)
@@ -1063,6 +1061,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
+ a.NoError(err)
submitAndWaitForTransaction(manager, tx, "destroying assets", client, fixture, a)
// Check again that asset is destroyed
@@ -1072,6 +1071,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
// Should be able to close now
wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
_, err = client.SendPaymentFromWallet(wh, nil, account0, "", 0, 0, nil, reserve, 0, 0)
a.NoError(err)
}
@@ -1084,7 +1084,7 @@ func setupTestAndNetwork(t *testing.T, networkTemplate string, consensus config.
t.Parallel()
asser := require.New(fixtures.SynchronizedTest(t))
- if 0 == len(networkTemplate) {
+ if len(networkTemplate) == 0 {
// If the networkTemplate is not specified, used the default one
networkTemplate = "TwoNodes50Each.json"
}
@@ -1113,6 +1113,7 @@ func createAsset(assetName, account0, manager, reserve, freeze, clawback string,
// Create two assets: one with default-freeze, and one without default-freeze
txids := make(map[string]string)
wh, err := client.GetUnencryptedWalletHandle()
+ asser.NoError(err)
tx, err := client.MakeUnsignedAssetCreateTx(100, false, manager, reserve, freeze, clawback, assetName, "testunit", assetURL, assetMetadataHash, 0)
txid, err := helperFillSignBroadcast(*client, wh, account0, tx, err)
asser.NoError(err)
@@ -1128,6 +1129,7 @@ func setupActors(account0 string, client *libgoal.Client, asser *require.Asserti
// Setup the actors
wh, err := client.GetUnencryptedWalletHandle()
+ asser.NoError(err)
manager, err = client.GenerateAddress(wh)
asser.NoError(err)
reserve, err = client.GenerateAddress(wh)
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index be2ff60ff..fdda7eea6 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -18,8 +18,6 @@ package transactions
import (
"fmt"
- "io/ioutil"
- "os"
"path/filepath"
"testing"
@@ -103,6 +101,7 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
goOfflineUTx, err := client.MakeUnsignedGoOfflineTx(initiallyOnline, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go offline tx")
wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
offlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, goOfflineUTx)
a.NoError(err, "should be no errors when going offline")
@@ -114,6 +113,7 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
becomeNonparticpatingUTx, err := client.MakeUnsignedBecomeNonparticipatingTx(becomesNonparticipating, curRound, curRound+transactionValidityPeriod, transactionFee)
a.NoError(err, "should be able to make become-nonparticipating tx")
wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
nonparticipatingTxID, err = client.SignAndBroadcastTransaction(wh, nil, becomeNonparticpatingUTx)
a.NoError(err, "should be no errors when marking nonparticipating")
}
@@ -170,12 +170,9 @@ func TestCloseOnError(t *testing.T) {
// get the current round for partkey creation
_, curRound := fixture.GetBalanceAndRound(initiallyOnline)
- tempDir, err := ioutil.TempDir(os.TempDir(), "test-close-on-error")
- require.NoError(t, err)
- defer os.RemoveAll(tempDir)
-
var partkeyFile string
- _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, tempDir)
+ _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, t.TempDir())
+ a.NoError(err)
// make a participation key for initiallyOffline
_, err = client.AddParticipationKey(partkeyFile)
diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go
index 9cbc15107..8de0dcb02 100644
--- a/test/e2e-go/features/transactions/proof_test.go
+++ b/test/e2e-go/features/transactions/proof_test.go
@@ -23,7 +23,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
- "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -99,36 +98,19 @@ func TestTxnMerkleProof(t *testing.T) {
blk, err := client.BookkeepingBlock(confirmedTx.ConfirmedRound)
a.NoError(err)
- proofresp, err := client.TxnProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha512_256)
+ proofresp, proof, err := fixture.TransactionProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha512_256)
a.NoError(err)
- proofrespSHA256, err := client.TxnProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha256)
+ proofrespSHA256, proofSHA256, err := fixture.TransactionProof(txid.String(), confirmedTx.ConfirmedRound, crypto.Sha256)
a.NoError(err)
- generateProof := func(h crypto.HashType, prfRsp generated.ProofResponse) (p merklearray.Proof) {
- p.HashFactory = crypto.HashFactory{HashType: h}
- p.TreeDepth = uint8(prfRsp.Treedepth)
- a.NotEqual(p.TreeDepth, 0)
- proofconcat := prfRsp.Proof
- for len(proofconcat) > 0 {
- var d crypto.Digest
- copy(d[:], proofconcat)
- p.Path = append(p.Path, d[:])
- proofconcat = proofconcat[len(d):]
- }
- return
- }
-
- proof := generateProof(crypto.Sha512_256, proofresp)
- proofSHA256 := generateProof(crypto.Sha256, proofrespSHA256)
-
element := TxnMerkleElemRaw{Txn: crypto.Digest(txid)}
copy(element.Stib[:], proofresp.Stibhash[:])
elems := make(map[uint64]crypto.Hashable)
elems[proofresp.Idx] = &element
- err = merklearray.Verify(blk.TxnCommitments.NativeSha512_256Commitment.ToSlice(), elems, &proof)
+ err = merklearray.Verify(blk.TxnCommitments.NativeSha512_256Commitment.ToSlice(), elems, proof.ToProof())
if err != nil {
t.Logf("blk.TxnCommitments : %v \nproof path %v \ndepth: %d \nStibhash %v\nIndex: %d", blk.TxnCommitments.NativeSha512_256Commitment.ToSlice(), proof.Path, proof.TreeDepth, proofresp.Stibhash, proofresp.Idx)
a.NoError(err)
@@ -140,7 +122,7 @@ func TestTxnMerkleProof(t *testing.T) {
elems = make(map[uint64]crypto.Hashable)
elems[proofrespSHA256.Idx] = &element
- err = merklearray.VerifyVectorCommitment(blk.TxnCommitments.Sha256Commitment.ToSlice(), elems, &proofSHA256)
+ err = merklearray.VerifyVectorCommitment(blk.TxnCommitments.Sha256Commitment.ToSlice(), elems, proofSHA256.ToProof())
if err != nil {
t.Logf("blk.TxnCommitments : %v \nproof path %v \ndepth: %d \nStibhash %v\nIndex: %d", blk.TxnCommitments.Sha256Commitment.ToSlice(), proofSHA256.Path, proofSHA256.TreeDepth, proofrespSHA256.Stibhash, proofrespSHA256.Idx)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index be10042fd..d17bd7dd2 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -93,7 +93,9 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i
}
pingBalance, err := c.GetBalance(pingAccount)
+ a.NoError(err)
pongBalance, err := c.GetBalance(pongAccount)
+ a.NoError(err)
a.Equal(pingBalance, pongBalance, "both accounts should start with same balance")
a.NotEqual(pingAccount, pongAccount, "accounts under study should be different")
@@ -124,12 +126,11 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i
expectedPongBalance = expectedPongBalance - transactionFee - amountPongSendsPing + amountPingSendsPong
var pongTxInfo, pingTxInfo v1.Transaction
- pongTxInfo, err = pingClient.PendingTransactionInformation(pongTx.ID().String())
+ pongTxInfo, err = pongClient.PendingTransactionInformation(pongTx.ID().String())
if err == nil {
pingTxInfo, err = pingClient.PendingTransactionInformation(pingTx.ID().String())
}
waitForTransaction = err != nil || pongTxInfo.ConfirmedRound == 0 || pingTxInfo.ConfirmedRound == 0
-
if waitForTransaction {
curStatus, _ := pongClient.Status()
curRound := curStatus.LastRound
@@ -140,23 +141,31 @@ func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends i
curStatus, _ := pongClient.Status()
curRound := curStatus.LastRound
- if waitForTransaction {
- fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pongClient.DataDir()))
- fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses)
- }
+ confirmed := true
+
+ fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pongClient.DataDir()))
+ confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses)
+ a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5))
+ confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses)
+ a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5))
- pingBalance, _ = fixture.GetBalanceAndRound(pingAccount)
- pongBalance, _ = fixture.GetBalanceAndRound(pongAccount)
+ pingBalance, err = pongClient.GetBalance(pingAccount)
+ a.NoError(err)
+ pongBalance, err = pongClient.GetBalance(pongAccount)
+ a.NoError(err)
a.True(expectedPingBalance <= pingBalance, "ping balance is different than expected.")
a.True(expectedPongBalance <= pongBalance, "pong balance is different than expected.")
- if waitForTransaction {
- fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pingClient.DataDir()))
- fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses)
- }
+ fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pingClient.DataDir()))
+ confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses)
+ a.True(confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5))
+ confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses)
+ a.True(confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5))
- pingBalance, _ = fixture.GetBalanceAndRound(pingAccount)
- pongBalance, _ = fixture.GetBalanceAndRound(pongAccount)
+ pingBalance, err = pingClient.GetBalance(pingAccount)
+ a.NoError(err)
+ pongBalance, err = pingClient.GetBalance(pongAccount)
+ a.NoError(err)
a.True(expectedPingBalance <= pingBalance, "ping balance is different than expected.")
a.True(expectedPongBalance <= pongBalance, "pong balance is different than expected.")
}
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
index b70be41e0..364dcfb03 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
@@ -380,7 +380,7 @@ func TestSignProgram(t *testing.T) {
a.NotEqual(sig, crypto.Signature{})
ph := logic.Program(program)
- a.True(secrets.SignatureVerifier.Verify(ph, sig, true))
+ a.True(secrets.SignatureVerifier.Verify(ph, sig))
}
func BenchmarkSignTransaction(b *testing.B) {
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index dee64d399..3506b8b66 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -554,7 +554,8 @@ func TestAccountParticipationInfo(t *testing.T) {
lastRound := basics.Round(params.LastRound + 1000)
dilution := uint64(100)
var stateproof merklesignature.Verifier
- stateproof[0] = 1 // change some byte so the stateproof is not considered empty (required since consensus v31)
+ stateproof.KeyLifetime = merklesignature.KeyLifetimeDefault
+ stateproof.Commitment[0] = 1 // change some byte so the stateproof is not considered empty (required since consensus v31)
randomVotePKStr := randomString(32)
var votePK crypto.OneTimeSignatureVerifier
@@ -579,7 +580,7 @@ func TestAccountParticipationInfo(t *testing.T) {
VoteKeyDilution: dilution,
VoteFirst: firstRound,
VoteLast: lastRound,
- StateProofPK: stateproof,
+ StateProofPK: stateproof.Commitment,
},
}
txID, err := testClient.SignAndBroadcastTransaction(wh, nil, tx)
@@ -1160,7 +1161,7 @@ func TestStateProofParticipationKeysAPI(t *testing.T) {
actual := [merklesignature.MerkleSignatureSchemeRootSize]byte{}
a.NotNil(pRoot[0].Key.StateProofKey)
copy(actual[:], *pRoot[0].Key.StateProofKey)
- a.Equal(partkey.StateProofSecrets.GetVerifier()[:], actual[:])
+ a.Equal(partkey.StateProofSecrets.GetVerifier().Commitment[:], actual[:])
}
func TestNilStateProofInParticipationInfo(t *testing.T) {
diff --git a/test/e2e-go/upgrades/stateproof_test.go b/test/e2e-go/upgrades/stateproof_participation_test.go
index 6f9cf7f13..6f9cf7f13 100644
--- a/test/e2e-go/upgrades/stateproof_test.go
+++ b/test/e2e-go/upgrades/stateproof_participation_test.go
diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go
index f1134296a..3d7293d40 100644
--- a/test/framework/fixtures/expectFixture.go
+++ b/test/framework/fixtures/expectFixture.go
@@ -19,7 +19,6 @@ package fixtures
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -46,8 +45,7 @@ func (ef *ExpectFixture) initialize(t *testing.T) (err error) {
ef.t = t
ef.testDir = os.Getenv("TESTDIR")
if ef.testDir == "" {
- ef.testDir, _ = ioutil.TempDir("", "tmp")
- ef.testDir = filepath.Join(ef.testDir, "expect")
+ ef.testDir = filepath.Join(t.TempDir(), "expect")
err = os.MkdirAll(ef.testDir, 0755)
if err != nil {
ef.t.Errorf("error creating test dir %s, with error %v", ef.testDir, err)
@@ -163,7 +161,7 @@ func (ef *ExpectFixture) Run() {
cmd.Stdout = &outBuf
// Set stderr to be a file descriptor. In other way Go's exec.Cmd::writerDescriptor
- // attaches goroutine reading that blocks on io.Copy from stderr.
+ // attaches a goroutine reading stderr that blocks on io.Copy from stderr.
// Cmd::CombinedOutput sets stderr to stdout and also blocks.
// Cmd::Start + Cmd::Wait with manual pipes redirection etc also blocks.
// Wrapping 'expect' with 'expect "$@" 2>&1' also blocks on stdout reading.
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index e6e90b47c..af84e4d2e 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -31,6 +31,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/libgoal"
@@ -499,3 +502,34 @@ func (f *LibGoalFixture) MinFeeAndBalance(round uint64) (minFee, minBalance uint
}
return params.MinTxnFee, minBalance, nil
}
+
+// TransactionProof returns a proof for usage in merkle array verification for the provided transaction.
+func (f *LibGoalFixture) TransactionProof(txid string, round uint64, hashType crypto.HashType) (generatedV2.TransactionProofResponse, merklearray.SingleLeafProof, error) {
+ proofResp, err := f.LibGoalClient.TransactionProof(txid, round, hashType)
+ if err != nil {
+ return generatedV2.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
+ }
+
+ proof, err := merklearray.ProofDataToSingleLeafProof(proofResp.Hashtype, proofResp.Treedepth, proofResp.Proof)
+ if err != nil {
+ return generatedV2.TransactionProofResponse{}, merklearray.SingleLeafProof{}, err
+ }
+
+ return proofResp, proof, nil
+}
+
+// LightBlockHeaderProof returns a proof for usage in merkle array verification for the provided block's light block header.
+func (f *LibGoalFixture) LightBlockHeaderProof(round uint64) (generatedV2.LightBlockHeaderProofResponse, merklearray.SingleLeafProof, error) {
+ proofResp, err := f.LibGoalClient.LightBlockHeaderProof(round)
+
+ if err != nil {
+ return generatedV2.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
+ }
+
+ proof, err := merklearray.ProofDataToSingleLeafProof(crypto.Sha256.String(), proofResp.Treedepth, proofResp.Proof)
+ if err != nil {
+ return generatedV2.LightBlockHeaderProofResponse{}, merklearray.SingleLeafProof{}, err
+ }
+
+ return proofResp, proof, nil
+}
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index fe45f755e..7265c560c 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -92,7 +92,7 @@ func (f *RestClientFixture) ClientWaitForRound(client client.RestClient, round u
}
select {
case <-timeout.C:
- return fmt.Errorf("timeout waiting for round %v", round)
+ return fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound)
case <-time.After(200 * time.Millisecond):
}
}
diff --git a/test/heapwatch/client_ram_report.py b/test/heapwatch/client_ram_report.py
index 6a49ea5cd..5ac0f2dd2 100644
--- a/test/heapwatch/client_ram_report.py
+++ b/test/heapwatch/client_ram_report.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import argparse
+import csv
import glob
import json
import logging
@@ -8,8 +9,7 @@ import os
import re
import sys
import subprocess
-
-from metrics_delta import parse_metrics, gather_metrics_files_by_nick
+import time
logger = logging.getLogger(__name__)
@@ -78,7 +78,7 @@ def get_heap_inuse_totals(dirpath):
else:
cached[nick] = sorted(old + recs)
if cached and bynick:
- with open(cache_path, 'wb') as fout:
+ with open(cache_path, 'wt') as fout:
json.dump(cached, fout)
return cached
@@ -86,6 +86,7 @@ def get_heap_inuse_totals(dirpath):
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dir', required=True, help='dir path to find /*.metrics in')
+ ap.add_argument('--csv')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -94,11 +95,41 @@ def main():
else:
logging.basicConfig(level=logging.INFO)
- metrics_files = glob.glob(os.path.join(args.dir, '*.metrics'))
- filesByNick = gather_metrics_files_by_nick(metrics_files)
-
heap_totals = get_heap_inuse_totals(args.dir)
+ if args.csv:
+ if args.csv == '-':
+ csvf = sys.stdout
+ else:
+ csvf = open(args.csv, 'wt')
+ writer = csv.writer(csvf)
+ whens = set()
+ for nick, recs in heap_totals.items():
+ for ts, n in recs:
+ whens.add(ts)
+ whens = sorted(whens)
+ nodes = sorted(heap_totals.keys())
+ writer.writerow(['when','dt','round'] + nodes)
+ first = None
+ for ts in whens:
+ tv = time.mktime(time.strptime(ts, '%Y%m%d_%H%M%S'))
+ if first is None:
+ first = tv
+ nick = nodes[0]
+ bipath = os.path.join(args.dir, '{}.{}.blockinfo.json'.format(nick, ts))
+ try:
+ bi = json.load(open(bipath))
+ rnd = str(bi['block']['rnd'])
+ except:
+ rnd = ''
+ row = [ts, tv-first, rnd]
+ for nick in nodes:
+ for rec in heap_totals[nick]:
+ if rec[0] == ts:
+ row.append(rec[1])
+ break
+ writer.writerow(row)
+
return 0
if __name__ == '__main__':
diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh
index 82560f118..be03860f4 100755
--- a/test/heapwatch/start.sh
+++ b/test/heapwatch/start.sh
@@ -20,7 +20,7 @@ goal network start -r "${TESTDIR}"
sleep 2
mkdir -p "${TESTDIR}/heaps"
-python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 10m --metrics --blockinfo "${TESTDIR}/"* &
+python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 5m --metrics --blockinfo "${TESTDIR}/"* &
echo "$!" > .heapWatch.pid
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index 8f5183503..bee9a48d3 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -147,54 +147,36 @@ tasks:
stashId: ${JENKINS_JOB_CACHE_ID}/darwin-arm64
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
- task: stash.Stash
name: linux-amd64
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
- task: stash.Stash
name: darwin-amd64
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
- task: stash.Stash
name: linux-arm64
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm64
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
- task: stash.Stash
name: linux-arm
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
- task: stash.Stash
name: packages
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/packages
globSpecs:
- tmp/node_pkgs/**/*
- - installer/genesis/devnet/genesis.json
- - installer/genesis/testnet/genesis.json
- - installer/genesis/mainnet/genesis.json
# Unstash tasks
- task: stash.Unstash
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index e9a6b251b..ea829b2e8 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -122,11 +122,11 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
. "${TEMPDIR}/ve/bin/activate"
"${TEMPDIR}/ve/bin/pip3" install --upgrade pip
"${TEMPDIR}/ve/bin/pip3" install --upgrade cryptograpy
-
+
# Pin a version of our python SDK's so that breaking changes don't spuriously break our tests.
# Please update as necessary.
"${TEMPDIR}/ve/bin/pip3" install py-algorand-sdk==1.9.0b1
-
+
# Enable remote debugging:
"${TEMPDIR}/ve/bin/pip3" install --upgrade debugpy
duration "e2e client setup"
@@ -158,39 +158,46 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
./timeout 200 ./e2e_basic_start_stop.sh
duration "e2e_basic_start_stop.sh"
- echo "Current platform: ${E2E_PLATFORM}"
-
KEEP_TEMPS_CMD_STR=""
# If the platform is arm64, we want to pass "--keep-temps" into e2e_client_runner.py
# so that we can keep the temporary test artifact for use in the indexer e2e tests.
# The file is located at ${TEMPDIR}/net_done.tar.bz2
- if [ "$E2E_PLATFORM" == "arm64" ]; then
+ if [ -n "$CI_KEEP_TEMP_PLATFORM" ] && [ "$CI_KEEP_TEMP_PLATFORM" == "$CI_PLATFORM" ]; then
+ echo "Setting --keep-temps so that an e2e artifact can be saved."
KEEP_TEMPS_CMD_STR="--keep-temps"
fi
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${KEEP_TEMPS_CMD_STR} ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
+
+ clientrunner="${TEMPDIR}/ve/bin/python3 e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT}"
+
+ $clientrunner ${KEEP_TEMPS_CMD_STR} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
# If the temporary artifact directory exists, then the test artifact needs to be created
if [ -d "${TEMPDIR}/net" ]; then
+ # This should be set by CI, but if it isn't set a default.
+ if [ -z "$CI_E2E_FILENAME" ]; then
+ CI_E2E_FILENAME="net_done"
+ fi
+
pushd "${TEMPDIR}" || exit 1
- tar -j -c -f net_done.tar.bz2 --exclude node.log --exclude agreement.cdv net
+ tar -j -c -f "${CI_E2E_FILENAME}.tar.bz2" --exclude node.log --exclude agreement.cdv net
rm -rf "${TEMPDIR}/net"
RSTAMP=$(TZ=UTC python -c 'import time; print("{:08x}".format(0xffffffff - int(time.time() - time.mktime((2020,1,1,0,0,0,-1,-1,-1)))))')
- echo aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
- aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
+ echo aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://algorand-testdata/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2"
+ aws s3 cp --acl public-read "${TEMPDIR}/${CI_E2E_FILENAME}.tar.bz2" "s3://algorand-testdata/indexer/e2e4/${RSTAMP}/${CI_E2E_FILENAME}.tar.bz2"
popd
fi
duration "parallel client runner"
for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh
+ $clientrunner --version "$(basename "$vdir")" "$vdir"/*.sh
done
duration "vdir client runners"
for script in "$SRCROOT"/test/scripts/e2e_subs/serial/*; do
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} $script
+ $clientrunner "$script"
done
deactivate
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 0156bc7ae..6fa5b4ffb 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -21,6 +21,7 @@
import argparse
import atexit
import base64
+import datetime
import glob
import json
import logging
@@ -39,7 +40,7 @@ import algosdk
logger = logging.getLogger(__name__)
scriptdir = os.path.dirname(os.path.realpath(__file__))
-repodir = os.path.join(scriptdir, "..", "..")
+repodir = os.path.join(scriptdir, "..", "..")
# less than 16kB of log we show the whole thing, otherwise the last 16kB
LOG_WHOLE_CUTOFF = 1024 * 16
@@ -131,11 +132,8 @@ def _script_thread_inner(runset, scriptname, timeout):
env = dict(runset.env)
env['TEMPDIR'] = os.path.join(env['TEMPDIR'], walletname)
os.makedirs(env['TEMPDIR'])
- cmdlogpath = os.path.join(env['TEMPDIR'],'.cmdlog')
+ cmdlogpath = os.path.join(env['TEMPDIR'], '.cmdlog')
cmdlog = open(cmdlogpath, 'wb')
- if not runset.is_ok():
- runset.done(scriptname, False, time.time() - start)
- return
logger.info('starting %s', scriptname)
p = subprocess.Popen([scriptname, walletname], env=env, cwd=repodir, stdout=cmdlog, stderr=subprocess.STDOUT)
cmdlog.close()
@@ -150,12 +148,16 @@ def _script_thread_inner(runset, scriptname, timeout):
retcode = -1
dt = time.time() - start
-
if runset.terminated:
logger.info('Program terminated before %s finishes.', scriptname)
runset.done(scriptname, False, dt)
return
+ with runset.lock:
+ with open(cmdlogpath, 'r') as fin:
+ for line in fin:
+ runset.event_log("output", scriptname, output=line)
+
if retcode != 0:
with runset.lock:
logger.error('%s failed in %f seconds', scriptname, dt)
@@ -168,11 +170,11 @@ def _script_thread_inner(runset, scriptname, timeout):
if len(lines) > 1:
# drop probably-partial first line
lines = lines[1:]
- sys.stderr.write('end of log follows ({}):\n'.format(scriptname))
+ sys.stderr.write(f'end of log follows ({scriptname}):\n')
sys.stderr.write('\n'.join(lines))
sys.stderr.write('\n\n')
else:
- sys.stderr.write('whole log follows ({}):\n'.format(scriptname))
+ sys.stderr.write(f'whole log follows ({scriptname}):\n')
sys.stderr.write(fin.read())
else:
logger.info('finished %s OK in %f seconds', scriptname, dt)
@@ -183,36 +185,27 @@ def script_thread(runset, scriptname, to):
start = time.time()
try:
_script_thread_inner(runset, scriptname, to)
- except Exception as e:
+ except Exception:
logger.error('error in e2e_client_runner.py', exc_info=True)
runset.done(scriptname, False, time.time() - start)
-def killthread(runset):
- time.sleep(5)
- runset.kill()
- return
class RunSet:
def __init__(self, env):
self.env = env
self.threads = {}
self.procs = {}
- self.ok = True
self.lock = threading.Lock()
self.terminated = None
- self.killthread = None
self.kmd = None
self.algod = None
self.pubw = None
self.maxpubaddr = None
self.errors = []
self.statuses = []
+ self.jsonfile = None
return
- def is_ok(self):
- with self.lock:
- return self.ok
-
def connect(self):
with self.lock:
self._connect()
@@ -225,7 +218,7 @@ class RunSet:
# should run from inside self.lock
algodata = self.env['ALGORAND_DATA']
- xrun(['goal', 'kmd', 'start', '-t', '3600','-d', algodata], env=self.env, timeout=5)
+ xrun(['goal', 'kmd', 'start', '-t', '3600', '-d', algodata], env=self.env, timeout=5)
self.kmd = openkmd(algodata)
self.algod = openalgod(algodata)
@@ -241,7 +234,6 @@ class RunSet:
pubwid = xw['id']
pubw = self.kmd.init_wallet_handle(pubwid, '')
pubaddrs = self.kmd.list_keys(pubw)
- pubbalances = []
maxamount = 0
maxpubaddr = None
for pa in pubaddrs:
@@ -254,9 +246,7 @@ class RunSet:
return self.pubw, self.maxpubaddr
def start(self, scriptname, timeout):
- with self.lock:
- if not self.ok:
- return
+ self.event_log("run", scriptname)
t = threading.Thread(target=script_thread, args=(self, scriptname, timeout))
t.start()
with self.lock:
@@ -267,18 +257,13 @@ class RunSet:
self.procs[scriptname] = p
def done(self, scriptname, ok, seconds):
+ self.event_log("pass" if ok else "fail", scriptname, seconds)
with self.lock:
self.statuses.append( {'script':scriptname, 'ok':ok, 'seconds':seconds} )
if not ok:
self.errors.append('{} failed'.format(scriptname))
self.threads.pop(scriptname, None)
self.procs.pop(scriptname, None)
- self.ok = self.ok and ok
- if not self.ok:
- self._terminate()
- if self.killthread is None:
- self.killthread = threading.Thread(target=killthread, args=(self,), daemon=True)
- self.killthread.start()
def _terminate(self):
# run from inside self.lock
@@ -288,12 +273,6 @@ class RunSet:
for p in self.procs.values():
p.terminate()
- def kill(self):
- with self.lock:
- for p in self.procs.values():
- p.kill()
- return
-
def wait(self, timeout):
now = time.time()
endt = now + timeout
@@ -312,9 +291,33 @@ class RunSet:
now = time.time()
if now >= endt:
with self.lock:
- self.ok = False
self._terminate()
+ def event_log(self, action, scriptname, elapsed=0.0, **kwargs):
+ if self.jsonfile:
+ prefix, base = os.path.split(scriptname)
+ prefix, package = os.path.split(prefix)
+ j = json.dumps(test_event(action, package, base, elapsed, **kwargs))
+ self.jsonfile.write(j+"\n")
+
+
+def test_event(action, package, test, elapsed=0.0, output="", time=None):
+ # Documented here: https://pkg.go.dev/cmd/test2json
+ event = {}
+
+ if time is None: # expected case
+ time = datetime.datetime.now()
+ event["Time"] = time.isoformat("T")+"Z"
+ event["Action"] = action # run | pause | cont | pass | bench | fail | output | skip
+ event["Package"] = package
+ event["Test"] = test
+ if elapsed > 0.0: # Should be set for Action=pass|fail
+ event["Elapsed"] = elapsed
+ if output: # Should be set for Action=output
+ event["Output"] = output
+
+ return event
+
# 'network stop' and 'network delete' are also tested and used as cleanup procedures
# so it re-raises exception in 'test' mode
@@ -410,7 +413,7 @@ def main():
ap.add_argument('--verbose', default=False, action='store_true')
ap.add_argument('--version', default="Future")
ap.add_argument('--unsafe_scrypt', default=False, action='store_true', help="allows kmd to run with unsafe scrypt attribute. This will speed up tests time")
-
+
args = ap.parse_args()
if args.verbose:
@@ -418,7 +421,11 @@ def main():
else:
logging.basicConfig(format=_logging_format, datefmt=_logging_datefmt, level=logging.INFO)
- logger.info('starting: %r', args.scripts)
+ if len(args.scripts) > 3:
+ logger.info('starting %d scripts', len(args.scripts))
+ else:
+ logger.info('starting: %r', args.scripts)
+
# start with a copy when making env for child processes
env = dict(os.environ)
tempdir = os.getenv('TEMPDIR')
@@ -453,6 +460,19 @@ def main():
xrun(['goal', 'node', 'status'], env=env, timeout=5)
rs = RunSet(env)
+
+ trdir = os.environ.get("TEST_RESULTS")
+ if trdir:
+ prefix, base = os.path.split(args.scripts[0])
+ prefix, package = os.path.split(prefix)
+ trdir = os.path.join(trdir, package)
+ os.makedirs(trdir, exist_ok=True)
+
+ jsonpath = os.path.join(trdir, "results.json")
+ rs.jsonfile = open(jsonpath, "w")
+ junitpath = os.path.join(trdir, "testresults.xml")
+ atexit.register(finish_test_results, rs.jsonfile, jsonpath, junitpath)
+
for scriptname in args.scripts:
rs.start(os.path.abspath(scriptname), args.timeout-10)
rs.wait(args.timeout)
@@ -474,5 +494,14 @@ def main():
return retcode
+
+def finish_test_results(jsonfile, jsonpath, junitpath):
+ # This only runs in CI, since TEST_RESULTS env var controls the
+ # block that opens the jsonfile, and registers this atexit. So we
+ # assume jsonfile is open, and gotestsum available.
+ jsonfile.close()
+ xrun(["gotestsum", "--junitfile", junitpath, "--raw-command", "cat", jsonpath])
+
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh
index 866a214dc..9c43685ed 100755
--- a/test/scripts/e2e_subs/asset-misc.sh
+++ b/test/scripts/e2e_subs/asset-misc.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-date '+asset-misc start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
-set -e
-set -x
+set -ex
set -o pipefail
export SHELLOPTS
WALLET=$1
-
gcmd="goal -w ${WALLET}"
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
@@ -17,6 +17,10 @@ ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTE=$(${gcmd} account new|awk '{ print $6 }')
+# X will be rekeyed to Y, so we can test goal asset <op> -S ...
+ACCOUNTX=$(${gcmd} account new|awk '{ print $6 }')
+ACCOUNTY=$(${gcmd} account new|awk '{ print $6 }')
+
ASSET_NAME='Birlot : décollage vs. ࠶🦪'
# to ensure IPFS URLs longer than 32 characters are supported
@@ -29,22 +33,29 @@ ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname amisc|grep 'Asset ID
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTB} --amount 1000000
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTC} --amount 1000000
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTD} --amount 1000000
+${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTX} --amount 1000000
+${gcmd} clerk send --from ${ACCOUNTX} --to ${ACCOUNTX} --amount 0 --rekey-to ${ACCOUNTY}
# opt in to asset
${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTB}
${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTC}
${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTD}
+${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTX} -S ${ACCOUNTY}
# fund asset
${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000
+# fund asset to rekeyed account, then return it
+${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNT} -t ${ACCOUNTX} -a 500
+${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTX} -S ${ACCOUNTY} -t ${ACCOUNT} -a 500
+
# asset send some and close the rest
${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTB} -t ${ACCOUNTC} -a 100 --close-to ${ACCOUNTD}
if ${gcmd} account info -a ${ACCOUNTC} |grep "${ASSET_NAME}"|grep -c -q 'balance 100 '; then
echo ok
else
- date '+asset-misc asset balance error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset balance error %Y%m%d_%H%M%S"
exit 1
fi
@@ -62,7 +73,7 @@ if [ "$MANAGER_ADDRESS" = "$ACCOUNT" ] \
&& [ "$CLAWBACK_ADDRESS" = "$ACCOUNT" ]; then
echo ok
else
- date '+asset-misc asset manager, reserve, freezer, and clawback should be creator error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset manager, reserve, freezer, and clawback should be creator error %Y%m%d_%H%M%S"
exit 1
fi
@@ -84,7 +95,7 @@ if [ "$IMMUTABLE_MANAGER_ADDRESS" = "" ] \
&& [ "$IMMUTABLE_CLAWBACK_ADDRESS" = "" ]; then
echo ok
else
- date '+asset-misc immutable asset manager/reserve/freezer/clawback addresses error %Y%m%d_%H%M%S'
+ date "+${scriptname} immutable asset manager/reserve/freezer/clawback addresses error %Y%m%d_%H%M%S"
exit 1
fi
@@ -104,14 +115,14 @@ if [ "$DMA_MANAGER_ADDRESS" = "$ACCOUNTB" ] \
&& [ "$DMA_CLAWBACK_ADDRESS" = "$ACCOUNTE" ]; then
echo ok
else
- date '+asset-misc asset addresses with diff manager/reserve/freeze/clawback error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset addresses with diff manager/reserve/freeze/clawback error %Y%m%d_%H%M%S"
exit 1
fi
# Test Scenario - check if asset is created successfully when passed in different combination of flags for addresses
# case 1: create asset with both manager flag and no-manager flag
if ${gcmd} asset create --creator "${ACCOUNT}" --no-manager --manager "${ACCOUNTB}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
- date '+asset-misc asset with --manager and --no-manager flags created successfully error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset with --manager and --no-manager flags created successfully error %Y%m%d_%H%M%S"
exit 1
else
echo "Expected. Cannot create asset with both manager flag and no-manager flag"
@@ -119,7 +130,7 @@ fi
# case 2: create asset with both reserve flag and no-reserve flag
if ${gcmd} asset create --creator "${ACCOUNT}" --no-reserve --reserve "${ACCOUNTC}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
- date '+asset-misc asset with --reserve and --no-reserve flags created successfully error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset with --reserve and --no-reserve flags created successfully error %Y%m%d_%H%M%S"
exit 1
else
echo "Expected. Cannot create asset with both reserve flag and no-reserve flag"
@@ -127,7 +138,7 @@ fi
# case 3: create asset with both freezer flag and no-freezer flag
if ${gcmd} asset create --creator "${ACCOUNT}" --no-freezer --freezer "${ACCOUNTD}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
- date '+asset-misc asset with --freezer and --no-freezer flags created successfully error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset with --freezer and --no-freezer flags created successfully error %Y%m%d_%H%M%S"
exit 1
else
echo "Expected. Cannot create asset with both freezer flag and no-freezer flag"
@@ -135,7 +146,7 @@ fi
# case 4: create asset with both clawback flag and no-clawback flag
if ${gcmd} asset create --creator "${ACCOUNT}" --no-clawback --clawback "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
- date '+asset-misc asset with --clawback and --no-clawback flags created successfully error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset with --clawback and --no-clawback flags created successfully error %Y%m%d_%H%M%S"
exit 1
else
echo "Expected. Cannot create asset with both clawback flag and no-clawback flag"
@@ -145,8 +156,23 @@ fi
if ${gcmd} asset create --creator "${ACCOUNT}" --no-freezer --no-clawback --reserve "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
echo "ok"
else
- date '+asset-misc asset with independent flags created unsuccessfully error %Y%m%d_%H%M%S'
+ date "+${scriptname} asset with independent flags created unsuccessfully error %Y%m%d_%H%M%S"
+ exit 1
+fi
+
+# case 6: create and destroy with a rekeyed account
+if ${gcmd} asset create --creator "${ACCOUNTX}" -S "${ACCOUNTY}" --name "${ASSET_NAME}" --unitname rkeymisc --total 10 --asseturl "${ASSET_URL}"; then
+ echo "ok"
+else
+ date "+${scriptname} rekeyed account unable to create asset %Y%m%d_%H%M%S"
+ exit 1
+fi
+ASSET_ID=$(${gcmd} asset info --creator ${ACCOUNTX} --unitname rkeymisc|grep 'Asset ID'|awk '{ print $3 }')
+if ${gcmd} asset destroy --creator "${ACCOUNTX}" -S "${ACCOUNTY}" --assetid $ASSET_ID; then
+ echo "ok"
+else
+ date "+${scriptname} rekeyed account unable to destroy asset %Y%m%d_%H%M%S"
exit 1
fi
-date '+asset-misc finish %Y%m%d_%H%M%S'
+date "+$scriptname OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/assets-app-b.sh b/test/scripts/e2e_subs/assets-app-b.sh
index 7aac4615d..688b77371 100755
--- a/test/scripts/e2e_subs/assets-app-b.sh
+++ b/test/scripts/e2e_subs/assets-app-b.sh
@@ -125,4 +125,4 @@ ${xcmd} --from $MANAGER destroy
# clear bob
${xcmd} --from $BOB clear
-date "+$scriptname done %Y%m%d_%H%M%S"
+date "+$scriptname OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/assets-app.sh b/test/scripts/e2e_subs/assets-app.sh
index 1fa2d8f5c..252512951 100755
--- a/test/scripts/e2e_subs/assets-app.sh
+++ b/test/scripts/e2e_subs/assets-app.sh
@@ -191,4 +191,4 @@ assertContains "$RES" "$ERR_APP_CL_STR" "optin of deleted application should fai
RES=$(${qcmd} total-supply 2>&1 || true)
assertContains "$RES" "$ERR_APP_NE_STR" "read global of deleted application should fail"
-date "+$scriptname done %Y%m%d_%H%M%S"
+date "+$scriptname OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/hdr-access-logicsig.sh b/test/scripts/e2e_subs/hdr-access-logicsig.sh
new file mode 100755
index 000000000..32c36d7b2
--- /dev/null
+++ b/test/scripts/e2e_subs/hdr-access-logicsig.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+cat >${TEMPDIR}/hdr.teal<<EOF
+#pragma version 7
+txn FirstValid
+int 1
+-
+block BlkTimestamp // need to make sure we don't ask for current
+
+txn FirstValid
+int 2
+-
+block BlkTimestamp
+// last two times are on stack
+-
+dup
+// difference in times is on stack twice
+
+int 1
+>
+assert
+
+int 6
+<
+EOF
+
+${gcmd} clerk compile -o ${TEMPDIR}/hdr.lsig -s -a ${ACCOUNT} ${TEMPDIR}/hdr.teal
+
+SIGACCOUNT=$(${gcmd} clerk compile -n ${TEMPDIR}/hdr.teal|awk '{ print $2 }')
+
+# Avoid rewards by giving less than an algo
+${gcmd} clerk send --amount 900000 --from ${ACCOUNT} --to ${SIGACCOUNT}
+
+function balance {
+ acct=$1; shift
+ goal account balance -a "$acct" | awk '{print $1}'
+}
+
+[ "$(balance "$SIGACCOUNT")" = 900000 ]
+
+# Don't let goal set lastvalid so far in the future, that prevents `block` access
+${gcmd} clerk send --amount 10 --from ${SIGACCOUNT} --to ${ACCOUNT} --lastvalid 100 -o ${TEMPDIR}/hdr.tx
+
+${gcmd} clerk sign -i ${TEMPDIR}/hdr.tx -o ${TEMPDIR}/hdr.stx --program ${TEMPDIR}/hdr.teal
+
+${gcmd} clerk rawsend -f ${TEMPDIR}/hdr.stx
+
+# remove min fee + 10
+[ "$(balance "$SIGACCOUNT")" = 898990 ]
diff --git a/test/scripts/e2e_subs/hdr-access.py b/test/scripts/e2e_subs/hdr-access.py
new file mode 100755
index 000000000..4da7856a3
--- /dev/null
+++ b/test/scripts/e2e_subs/hdr-access.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from goal import Goal
+
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1], autosend=True)
+
+joe = goal.new_account()
+
+txinfo, err = goal.pay(goal.account, joe, amt=500_000)
+assert not err, err
+
+teal = """
+#pragma version 7
+ txn FirstValidTime
+"""
+v7 = goal.assemble(teal)
+txinfo, err = goal.app_create(joe, v7)
+assert not err, err
+
+# It won't assemble in earlier versions, so manipulate the bytecode to test v6
+v6 = bytearray(v7)
+v6[0] = 6
+txinfo, err = goal.app_create(joe, v6)
+assert err
+assert "invalid txn field FirstValidTime" in str(err), err
+
+
+# Can't access two behind FirstValid because LastValid is 1000 after
+teal = """
+#pragma version 7
+ txn FirstValid
+ int 2
+ -
+ block BlkTimestamp
+"""
+txinfo, err = goal.app_create(joe, goal.assemble(teal))
+assert "not available" in str(err), err
+
+# We want to manipulate lastvalid, so we need to turn off autosend
+goal.autosend = False
+
+# We will be able to access two blocks, by setting lv explcitly. So we
+# test that the block timestamp from two blocks ago is between 2 and 5
+# (inclusive) seconds before the previous block timestamp. devMode
+# might mess this test up.
+teal = """
+#pragma version 7
+ txn FirstValid
+ int 1
+ -
+ block BlkTimestamp
+
+ txn FirstValid
+ int 2
+ -
+ block BlkTimestamp
+ // last two times are on stack
+ -
+ dup
+ // difference in times is on stack twice
+
+ int 1
+ >
+ assert
+
+ int 6
+ <
+"""
+checktimes = goal.assemble(teal)
+tx = goal.app_create(joe, goal.assemble(teal))
+tx.last_valid_round = tx.last_valid_round - 800
+txinfo, err = goal.send(tx)
+assert not err, err
+
+# block 0 is not accessible even with a low LastValid
+teal = """
+#pragma version 7
+ int 0
+ block BlkTimestamp
+"""
+tx = goal.app_create(joe, goal.assemble(teal))
+tx.last_valid_round = tx.last_valid_round - 800
+txinfo, err = goal.send(tx)
+assert "round 0 is not available" in str(err), err
+assert "outside [1-" in str(err), err # confirms that we can look back to 1
+
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/rekey.sh b/test/scripts/e2e_subs/rekey.sh
index 1308f0e60..6b96aceff 100755
--- a/test/scripts/e2e_subs/rekey.sh
+++ b/test/scripts/e2e_subs/rekey.sh
@@ -1,6 +1,8 @@
#!/bin/bash
-date '+e2e_subs/rekey.sh start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -exo pipefail
export SHELLOPTS
@@ -12,7 +14,7 @@ gcmd="goal -w ${WALLET}"
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
-# Rekeying should fail if in a txn group with a < v2 TEAL program
+# Rekeying should fail if in a txn group with a < v2 program
# Make v1 program
printf 'int 1' > "${TEMPDIR}/simplev1.teal"
@@ -46,12 +48,12 @@ cat "${TEMPDIR}/group0_split-0.stxn" "${TEMPDIR}/group0_split-1.txn" > "${TEMPDI
RES=$(${gcmd} clerk rawsend -f "${TEMPDIR}/group0_signed.stxn" 2>&1 || true)
EXPERROR='program version must be >= 2 for this transaction group'
if [[ $RES != *"${EXPERROR}"* ]]; then
- date '+e2e_subs/rekey.sh FAIL txn group with rekey transaction should require teal version >= 2 %Y%m%d_%H%M%S'
+ date "+${scriptname} FAIL txn group with rekey transaction should require version >= 2 %Y%m%d_%H%M%S"
false
fi
# Plan: make a txn group. First one is rekey-to payment from $ACCOUNTD, second
-# one is regular payment from v1 escrow. (Should succeed when we send it).
+# one is regular payment from v2 escrow. (Should succeed when we send it).
${gcmd} clerk send -a 1 -f "${ACCOUNTD}" -t "${ACCOUNTD}" --rekey-to "${ACCOUNT}" -o "${TEMPDIR}/txn2.tx"
${gcmd} clerk send -a 1 --from-program "${TEMPDIR}/simple.teal" -t "${ACCOUNTD}" -o "${TEMPDIR}/txn3.tx"
@@ -74,13 +76,18 @@ ${gcmd} account import -m "${mnemonic}"
${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" --rekey-to "${ACCOUNTC}"
-${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -o "${TEMPDIR}/ntxn"
+# Send with alternate spending key. Test two ways (with different
+# amounts, to help distinguish if failure occurs). First, by creating
+# a txn in a file, signing, then rawsend
+${gcmd} clerk send -a 80000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -o "${TEMPDIR}/ntxn"
${gcmd} clerk sign -S "${ACCOUNTC}" -i "${TEMPDIR}/ntxn" -o "${TEMPDIR}/nstxn"
${gcmd} clerk rawsend -f "${TEMPDIR}/nstxn"
+# Then by using goal syntax for send (-S) from rekeyed account
+${gcmd} clerk send -a 20000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -S "${ACCOUNTC}"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
if [ "$BALANCEB" -ne 200000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=200000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+ date "+${scriptname} FAIL wanted balance=200000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
@@ -91,7 +98,7 @@ ${gcmd} clerk rawsend -f "${TEMPDIR}/nstxn2"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
if [ "$BALANCEB" -ne 300000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+ date "+${scriptname} FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
@@ -101,16 +108,24 @@ ${gcmd} clerk sign -S "${ACCOUNTC}" -i "${TEMPDIR}/ntxn3" -o "${TEMPDIR}/nstxn3"
# This should fail because $ACCOUNT should have signed the transaction.
if ! ${gcmd} clerk rawsend -f "${TEMPDIR}/nstxn3"; then
- date '+e2e_subs/rekey.sh OK %Y%m%d_%H%M%S'
+ date "+${scriptname} OK %Y%m%d_%H%M%S"
else
- date "+e2e_subs/rekey.sh rawsend should have failed because of a bad signature %Y%m%d_%H%M%S"
+ date "+${scriptname} rawsend should have failed because of a bad signature %Y%m%d_%H%M%S"
+ false
+fi
+
+# This should fail because $ACCOUNT should have signed the transaction.
+if ! ${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -S "${ACCOUNTC}"; then
+ date "+${scriptname} OK %Y%m%d_%H%M%S"
+else
+ date "+${scriptname} send should have failed because of a bad signature %Y%m%d_%H%M%S"
false
fi
# Account balance should be the same amount as before.
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
if [ "$BALANCEB" -ne 300000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+ date "+${scriptname} FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
@@ -119,7 +134,7 @@ ${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
if [ "$BALANCEB" -ne 400000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=400000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+ date "+${scriptname} FAIL wanted balance=400000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
@@ -130,6 +145,6 @@ ${gcmd} clerk rawsend -f "${TEMPDIR}/ctx.stxn"
BALANCED=$(${gcmd} account balance -a "${ACCOUNTD}" | awk '{ print $1 }')
if [ "$BALANCED" -ne 0 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=0 but got ${BALANCED} %Y%m%d_%H%M%S"
+ date "+${scriptname} FAIL wanted balance=0 but got ${BALANCED} %Y%m%d_%H%M%S"
false
fi
diff --git a/test/scripts/e2e_subs/sectok-app.sh b/test/scripts/e2e_subs/sectok-app.sh
index 46154723f..5ed3e2d5e 100755
--- a/test/scripts/e2e_subs/sectok-app.sh
+++ b/test/scripts/e2e_subs/sectok-app.sh
@@ -214,4 +214,4 @@ ${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2
RES=$(${xcmd} --from $BOB transfer --receiver $CAROL --amount $XFER2 2>&1 || true)
assertContains "$RES" "$ERR_APP_REJ_STR3" "reverse transfer (by group) should fail"
-date "+$scriptname done %Y%m%d_%H%M%S"
+date "+$scriptname OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/tealprogs/quine.map b/test/scripts/e2e_subs/tealprogs/quine.map
index 02e426d14..a7584274e 100644
--- a/test/scripts/e2e_subs/tealprogs/quine.map
+++ b/test/scripts/e2e_subs/tealprogs/quine.map
@@ -1 +1 @@
-{"version":3,"sources":["test/scripts/e2e_subs/tealprogs/quine.teal"],"names":[],"mapping":";AAOA;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAQA;AASA;;;AAUA;;;AAWA;AAYA;;AAaA;AAcA;;;AAeA;AAgBA;AAiBA;;AAkBA;;AAmBA;AAoBA;AAqBA"} \ No newline at end of file
+{"version":3,"sources":["test/scripts/e2e_subs/tealprogs/quine.teal"],"names":[],"mapping":";AAOA;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA;AACA;;;AACA;;;AACA;AACA;;AACA;AACA;;;AACA;AACA;AACA;;AACA;;AACA;AACA;AACA","mappings":";AAOA;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA;AACA;;;AACA;;;AACA;AACA;;AACA;AACA;;;AACA;AACA;AACA;;AACA;;AACA;AACA;AACA"} \ No newline at end of file
diff --git a/test/scripts/tps.py b/test/scripts/tps.py
new file mode 100644
index 000000000..834cdbb7a
--- /dev/null
+++ b/test/scripts/tps.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python3
+#
+# Ask algod what its recent Transactions Per Second have been
+#
+# usage:
+# python3 tps.py -r 10 --verbose $ALGORAND_DATA
+
+import argparse
+import logging
+import os
+import sys
+
+# pip install py-algorand-sdk
+import algosdk
+from algosdk.encoding import msgpack
+import algosdk.v2client
+import algosdk.v2client.algod
+
+logger = logging.getLogger(__name__)
+
+def read_algod_dir(algorand_data):
+ with open(os.path.join(algorand_data, 'algod.net')) as fin:
+ net = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.token')) as fin:
+ token = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.admin.token')) as fin:
+ admin_token = fin.read().strip()
+ return net, token, admin_token
+
+def algod_client_for_dir(algorand_data, headers=None):
+ if headers is None:
+ headers = {}
+ net, token, admin_token = read_algod_dir(algorand_data)
+ if not net.startswith('http'):
+ net = 'http://' + net
+ return algosdk.v2client.algod.AlgodClient(token, net, headers)
+
+def get_blockinfo_tps(algod, rounds=10):
+ status = algod.status()
+ rounds = 10
+ ba = msgpack.loads(algod.block_info(status['last-round']-rounds, response_format='msgpack'), strict_map_key=False)
+ bb = msgpack.loads(algod.block_info(status['last-round'], response_format='msgpack'), strict_map_key=False)
+ ra = ba['block']['rnd']
+ rb = bb['block']['rnd']
+ assert(rb - ra == rounds)
+ tca = ba['block']['tc']
+ tcb = bb['block']['tc']
+ tsa = ba['block']['ts']
+ tsb = bb['block']['ts']
+ dt = tsb-tsa
+ dtxn = tcb-tca
+ tps = dtxn/dt
+ logger.debug('(b[%d].TimeStamp %d) - (b[%d].TimeStamp %d) = %.1f seconds', ra, tsa, rb, tsb, dt)
+ logger.debug('(b[%d].TxnCounter %d) - (b[%d].TxnCounter %d) = %d txns', ra, tca, rb, tcb, dtxn)
+ return tps
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from')
+ ap.add_argument('-d', dest='algorand_data')
+ ap.add_argument('-r', '--rounds', type=int, help='number of rounds to calculate over')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ datadirs = args.data_dirs
+ if args.algorand_data:
+ datadirs = datadirs + [args.algorand_data]
+
+ for adir in datadirs:
+ algod = algod_client_for_dir(adir)
+ tps = get_blockinfo_tps(algod, rounds=args.rounds)
+ print('{:5.1f}\t{}'.format(tps, adir))
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/test/testdata/configs/config-v23.json b/test/testdata/configs/config-v23.json
new file mode 100644
index 000000000..d30ba9e75
--- /dev/null
+++ b/test/testdata/configs/config-v23.json
@@ -0,0 +1,106 @@
+{
+ "Version": 23,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 7,
+ "AgreementIncomingProposalsQueueLength": 25,
+ "AgreementIncomingVotesQueueLength": 10000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAcctLookback": 4,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/test/testdata/consensus/catchpointtestingprotocol.json b/test/testdata/consensus/catchpointtestingprotocol.json
index 586079f07..0fded2ded 100644
--- a/test/testdata/consensus/catchpointtestingprotocol.json
+++ b/test/testdata/consensus/catchpointtestingprotocol.json
@@ -26,6 +26,7 @@
"EnableFeePooling": true,
"EnableKeyregCoherencyCheck": true,
"EnableAccountDataResourceSeparation": true,
+ "EnableOnlineAccountCatchpoints": true,
"FastRecoveryLambda": 300000000000,
"FixTransactionLeases": true,
"ForceNonParticipatingFeeSink": true,
@@ -55,6 +56,7 @@
"MaxAssetUnitNameBytes": 8,
"MaxAssetsPerAccount": 1000,
"MaxBalLookback": 32,
+ "CatchpointLookback": 32,
"MaxExtraAppProgramPages": 3,
"MaxGlobalSchemaEntries": 64,
"MaxInnerTransactions": 16,
diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
index 14b6c6151..e2cc49790 100644
--- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
+++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
@@ -237,6 +237,12 @@
"BaseConfiguration": "c5.4xlarge"
},
{
+ "Name": "AWS-US-EAST-2-m5d.xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "m5d.xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-m5d.2xl",
"Provider": "AWS",
"Region": "us-east-2",
@@ -267,6 +273,42 @@
"BaseConfiguration": "c5d.18xlarge"
},
{
+ "Name": "AWS-AP-EAST-1-c5.xlarge",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "c5.xlarge"
+ },
+ {
+ "Name": "AWS-AP-EAST-1-Small",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "c5.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-EAST-1-Large",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "c5.4xlarge"
+ },
+ {
+ "Name": "AWS-AP-EAST-1-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-EAST-1-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-AP-EAST-1-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "ap-east-1",
+ "BaseConfiguration": "c5d.9xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTH-1-c5.xlarge",
"Provider": "AWS",
"Region": "ap-south-1",
@@ -285,7 +327,7 @@
"BaseConfiguration": "c5.4xlarge"
},
{
- "Name": "AWS-AP-SOUTH-1--m5d.2xl",
+ "Name": "AWS-AP-SOUTH-1-m5d.2xl",
"Provider": "AWS",
"Region": "ap-south-1",
"BaseConfiguration": "m5d.2xlarge"
@@ -411,6 +453,60 @@
"BaseConfiguration": "c5.4xlarge"
},
{
+ "Name": "AWS-AP-NORTHEAST-2-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-2",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-2-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-2",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-c5.xlarge",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "c5.xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-Small",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "c5.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-Large",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "c5.4xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-3-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-3",
+ "BaseConfiguration": "c5d.9xlarge"
+ },
+ {
+ "Name": "AWS-AP-NORTHEAST-2-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "ap-northeast-2",
+ "BaseConfiguration": "c5d.9xlarge"
+ },
+ {
"Name": "AWS-EU-CENTRAL-1-c5.xlarge",
"Provider": "AWS",
"Region": "eu-central-1",
@@ -447,6 +543,42 @@
"BaseConfiguration": "c5d.9xlarge"
},
{
+ "Name": "AWS-EU-SOUTH-1-c5.xlarge",
+ "Provider": "AWS",
+ "Region": "eu-south-1",
+ "BaseConfiguration": "c5.xlarge"
+ },
+ {
+ "Name": "AWS-EU-SOUTH-1-Small",
+ "Provider": "AWS",
+ "Region": "eu-south-1",
+ "BaseConfiguration": "c5.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-SOUTH-1-Large",
+ "Provider": "AWS",
+ "Region": "eu-south-1",
+ "BaseConfiguration": "c5.4xlarge"
+ },
+ {
+ "Name": "AWS-EU-SOUTH-1-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "eu-SOUTH-1",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-EU-SOUTH-1-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-south-1",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-EU-SOUTH-1-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "eu-south-1",
+ "BaseConfiguration": "c5d.9xlarge"
+ },
+ {
"Name": "AWS-EU-WEST-1-c5.xlarge",
"Provider": "AWS",
"Region": "eu-west-1",
@@ -614,6 +746,54 @@
"Provider": "AWS",
"Region": "ca-central-1",
"BaseConfiguration": "c5d.9xlarge"
+ },
+ {
+ "Name": "AWS-ME-SOUTH-1-c5.xlarge",
+ "Provider": "AWS",
+ "Region": "me-south-1",
+ "BaseConfiguration": "c5.xlarge"
+ },
+ {
+ "Name": "AWS-ME-SOUTH-1-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "me-south-1",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-ME-SOUTH-1-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "me-south-1",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-ME-SOUTH-1-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "me-south-1",
+ "BaseConfiguration": "c5d.9xlarge"
+ },
+ {
+ "Name": "AWS-AF-SOUTH-1-c5.xlarge",
+ "Provider": "AWS",
+ "Region": "af-south-1",
+ "BaseConfiguration": "c5.xlarge"
+ },
+ {
+ "Name": "AWS-AF-SOUTH-1-m5d.2xl",
+ "Provider": "AWS",
+ "Region": "af-south-1",
+ "BaseConfiguration": "m5d.2xlarge"
+ },
+ {
+ "Name": "AWS-AF-SOUTH-1-m5d.4xl",
+ "Provider": "AWS",
+ "Region": "af-south-1",
+ "BaseConfiguration": "m5d.4xlarge"
+ },
+ {
+ "Name": "AWS-AF-SOUTH-1-c5d.9xl",
+ "Provider": "AWS",
+ "Region": "af-south-1",
+ "BaseConfiguration": "c5d.9xlarge"
}
]
}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/Makefile b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
new file mode 100644
index 000000000..21d38bbbd
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 130 -R 136 -n 130 -H 16 --node-template configs/node.json --relay-template configs/relay.json --non-participating-node-template configs/nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json:
+ TOPOLOGY_FILE=$@ python gen_topology.py
+
+net.json: configs/node.json configs/relay.json configs/nonPartNode.json $(GOPATH)/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json $(PARAMS)
+
+genesis.json: configs/node.json configs/relay.json configs/nonPartNode.json $(GOPATH)/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json $(PARAMS)
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/README.md b/test/testdata/deployednettemplates/recipes/mmnet/README.md
new file mode 100644
index 000000000..4c7a40132
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/README.md
@@ -0,0 +1,29 @@
+# Model Mainnet (mmnet) Recipe
+
+> `NOTE`: The name `mmnet` is short for `model mainnet`.
+
+This recipe is meant to be a _best-effort representation_ of mainnet that can be used for testing purposes. In no way is this an exact copy as that would be financially unfeasible. This recipe was first created based on the [scenario2](../scenario2/) recipe with the intention of expanding on the number relays to match mainnet's distribution amongst other improvements.
+
+## Mainnet Relay Parity
+
+The analysis of mainnet relays was done on 2022-07-07, and the total number of relays were ~136 across various regions. This table outlines the distribution more specifically:
+
+| Provider | Region | Location | Number of Relays |
+| -------- | -------------- | ----------------------- | ------------------------------------ |
+| AWS | us-east-1 | Virginia, USA | 20 |
+| AWS | us-east-2 | Ohio, USA | 20 |
+| AWS | us-west-2 | Oregeon, USA | 10 |
+| AWS | ca-central-1 | Canada | 6 |
+| AWS | eu-west-1 | Ireland | 14 |
+| AWS | eu-north-1 | Stockholm, Sweden | 2 |
+| AWS | eu-south-1 | Milan, Italy | 4 (include 2 in Romania and Ukraine) |
+| AWS | ap-east-1 | Hong Kong, China | 5 |
+| AWS | ap-south-1 | Mumbai, India | 3 |
+| AWS | ap-southeast-1 | Singapore | 12 |
+| AWS | ap-southeast-2 | Sydney, Australia | 4 |
+| AWS | ap-northeast-3 | Osaka, Japan | 15 |
+| AWS | me-south-1 | Middle East (Bahrain) | 2 |
+| AWS | af-south-1 | Cape Town, South Africa | 4 |
+| AWS | sa-east-1 | Sao Paulo, Brazil | 4 (include 1 in Rio) |
+
+> `NOTE`: These values are represented by a dict in [gen_topology.py](./gen_topology.py)
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/configs/node.json b/test/testdata/deployednettemplates/recipes/mmnet/configs/node.json
new file mode 100644
index 000000000..7b505bb21
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/configs/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/configs/nonPartNode.json b/test/testdata/deployednettemplates/recipes/mmnet/configs/nonPartNode.json
new file mode 100644
index 000000000..5b0a52d9d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/configs/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/configs/relay.json b/test/testdata/deployednettemplates/recipes/mmnet/configs/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/configs/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/gen_topology.py b/test/testdata/deployednettemplates/recipes/mmnet/gen_topology.py
new file mode 100644
index 000000000..b6828644a
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/gen_topology.py
@@ -0,0 +1,142 @@
+import json
+import os
+
+# the name of the topology file to write to
+if "TOPOLOGY_FILE" in os.environ:
+ TOPOLOGY_FILE = os.environ["TOPOLOGY_FILE"]
+else:
+ TOPOLOGY_FILE = "topology.json"
+
+# the instance size of relays, nodes, and non-participating nodes
+NODE_SIZES = {
+ "R": "m5d.4xl",
+ "N": "m5d.4xl",
+ "NPN": "m5d.4xl"
+}
+
+# the default number of relays, nodes, or non-participating nodes if a region doesn't specify
+REGION_DEFAULTS = {
+ "R": 1,
+ "N": 4,
+ "NPN": 2
+}
+
+# mapping of regions and associated number of relays, nodes, and non-participating nodes
+REGIONS = {
+ "AWS-US-EAST-1": { # Virginia, USA
+ "R": 20,
+ "NPN": 2,
+ "N": 20
+ },
+ "AWS-US-EAST-2": { # Ohio, USA
+ "R": 20,
+ "NPN": 2,
+ "N": 20
+ },
+ "AWS-US-WEST-2": { # Oregon, USA
+ "R": 10,
+ "NPN": 2,
+ "N": 20
+ },
+ "AWS-CA-CENTRAL-1": { # Canada
+ "R": 6,
+ "NPN": 1,
+ "N": 5
+ },
+ "AWS-EU-CENTRAL-1": { # Frankfurt, Germany
+ "R": 10,
+ "NPN": 2,
+ "N": 10
+ },
+ "AWS-EU-WEST-1": { # Ireland
+ "R": 14,
+ "NPN": 1,
+ "N": 8
+ },
+ "AWS-EU-NORTH-1": { # Stockholm, Sweden
+ "R": 2,
+ "NPN": 1,
+ "N": 6
+ },
+ "AWS-EU-SOUTH-1": { # Milan, Italy
+ "R": 4,
+ "NPN": 1,
+ "N": 4
+ },
+ "AWS-AP-EAST-1": { # Hong Kong, China
+ "R": 5,
+ "NPN": 2,
+ "N": 10
+ },
+ "AWS-AP-SOUTH-1": { # Mumbai, India
+ "R": 3,
+ "NPN": 1,
+ "N": 2
+ },
+ "AWS-AP-SOUTHEAST-1": { # Singapore
+ "R": 12,
+ "NPN": 1,
+ "N": 2
+ },
+ "AWS-AP-SOUTHEAST-2": { # Sydney, Australia
+ "R": 4,
+ "NPN": 1,
+ "N": 4
+ },
+ "AWS-AP-NORTHEAST-2": { # Seoul, South Korea
+ "R": 1,
+ "NPN": 1,
+ "N": 2
+ },
+ "AWS-AP-NORTHEAST-3": { # Osaka, Japan
+ "R": 15,
+ "NPN": 1,
+ "N": 12
+ },
+ "AWS-ME-SOUTH-1": { # Middle East
+ "R": 2,
+ "NPN": 1,
+ "N": 2
+ },
+ "AWS-AF-SOUTH-1": { # Cape Town, South Africa
+ "R": 4,
+ "NPN": 1,
+ "N": 1
+ },
+ "AWS-SA-EAST-1": { # Sao Paulo, Brazil
+ "R": 4,
+ "NPN": 1,
+ "N": 4
+ }
+}
+
+
+host_elements = []
+region_count = len(REGIONS.keys())
+
+# dict that keeps track of number of nodes
+node_count = {
+ "R": 0,
+ "N": 0,
+ "NPN": 0
+}
+
+for region in REGIONS.keys():
+
+ # merge region-specific config with region defaults so that all values are set
+ region_config = {**REGION_DEFAULTS, **REGIONS[region]}
+
+ for node_type in region_config.keys():
+ for i in range(region_config[node_type]):
+ host = {}
+ host["Name"] = f"{node_type}{node_count[node_type] + 1}"
+ host["Template"] = f"{region}-{NODE_SIZES[node_type]}"
+ host_elements.append(host)
+
+ # increment counter for specific node_type
+ node_count[node_type] += 1
+
+
+ec2_hosts = {"Hosts": host_elements}
+with open(TOPOLOGY_FILE, "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/genesis.json b/test/testdata/deployednettemplates/recipes/mmnet/genesis.json
new file mode 100644
index 000000000..620844b1e
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/genesis.json
@@ -0,0 +1,744 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet31",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet32",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet33",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet34",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet35",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet36",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet37",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet38",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet39",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet40",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet41",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet42",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet43",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet44",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet45",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet46",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet47",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet48",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet49",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet50",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet51",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet52",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet53",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet54",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet55",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet56",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet57",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet58",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet59",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet60",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet61",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet62",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet63",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet64",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet65",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet66",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet67",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet68",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet69",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet70",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet71",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet72",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet73",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet74",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet75",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet76",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet77",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet78",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet79",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet80",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet81",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet82",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet83",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet84",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet85",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet86",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet87",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet88",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet89",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet90",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet91",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet92",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet93",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet94",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet95",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet96",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet97",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet98",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet99",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet100",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet101",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet102",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet103",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet104",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet105",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet106",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet107",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet108",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet109",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet110",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet111",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet112",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet113",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet114",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet115",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet116",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet117",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet118",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet119",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet120",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet121",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet122",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet123",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet124",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet125",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet126",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet127",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet128",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet129",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet130",
+ "Stake": 0.38461538461538464,
+ "Online": true
+ },
+ {
+ "Name": "Wallet131",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet132",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet133",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet134",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet135",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet136",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet137",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet138",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet139",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet140",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet141",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet142",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet143",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet144",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet145",
+ "Stake": 3.125,
+ "Online": false
+ },
+ {
+ "Name": "Wallet146",
+ "Stake": 3.125,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/net.json b/test/testdata/deployednettemplates/recipes/mmnet/net.json
new file mode 100644
index 000000000..6bc1592e0
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/net.json
@@ -0,0 +1,6196 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay6",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay7",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay8",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay9",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay10",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay11",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay12",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay13",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay14",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay15",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay16",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay17",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay18",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay19",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay20",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R21",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay21",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R22",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay22",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R23",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay23",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R24",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay24",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R25",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay25",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R26",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay26",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R27",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay27",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R28",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay28",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R29",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay29",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R30",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay30",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R31",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay31",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R32",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay32",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R33",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay33",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R34",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay34",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R35",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay35",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R36",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay36",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R37",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay37",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R38",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay38",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R39",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay39",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R40",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay40",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R41",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay41",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R42",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay42",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R43",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay43",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R44",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay44",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R45",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay45",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R46",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay46",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R47",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay47",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R48",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay48",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R49",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay49",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R50",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay50",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R51",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay51",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R52",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay52",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R53",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay53",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R54",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay54",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R55",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay55",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R56",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay56",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R57",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay57",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R58",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay58",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R59",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay59",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R60",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay60",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R61",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay61",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R62",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay62",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R63",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay63",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R64",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay64",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R65",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay65",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R66",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay66",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R67",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay67",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R68",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay68",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R69",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay69",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R70",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay70",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R71",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay71",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R72",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay72",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R73",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay73",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R74",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay74",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R75",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay75",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R76",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay76",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R77",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay77",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R78",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay78",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R79",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay79",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R80",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay80",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R81",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay81",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R82",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay82",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R83",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay83",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R84",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay84",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R85",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay85",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R86",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay86",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R87",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay87",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R88",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay88",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R89",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay89",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R90",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay90",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R91",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay91",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R92",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay92",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R93",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay93",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R94",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay94",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R95",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay95",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R96",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay96",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R97",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay97",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R98",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay98",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R99",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay99",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R100",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay100",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R101",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay101",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R102",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay102",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R103",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay103",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R104",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay104",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R105",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay105",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R106",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay106",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R107",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay107",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R108",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay108",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R109",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay109",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R110",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay110",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R111",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay111",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R112",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay112",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R113",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay113",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R114",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay114",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R115",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay115",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R116",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay116",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R117",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay117",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R118",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay118",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R119",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay119",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R120",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay120",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R121",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay121",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R122",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay122",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R123",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay123",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R124",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay124",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R125",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay125",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R126",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay126",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R127",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay127",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R128",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay128",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R129",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay129",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R130",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay130",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R131",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay131",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R132",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay132",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R133",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay133",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R134",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay134",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R135",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay135",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R136",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay136",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N21",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node21",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N22",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node22",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N23",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node23",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N24",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node24",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N25",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node25",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N26",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node26",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N27",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node27",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N28",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node28",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N29",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node29",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N30",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node30",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N31",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node31",
+ "Wallets": [
+ {
+ "Name": "Wallet31",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N32",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node32",
+ "Wallets": [
+ {
+ "Name": "Wallet32",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N33",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node33",
+ "Wallets": [
+ {
+ "Name": "Wallet33",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N34",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node34",
+ "Wallets": [
+ {
+ "Name": "Wallet34",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N35",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node35",
+ "Wallets": [
+ {
+ "Name": "Wallet35",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N36",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node36",
+ "Wallets": [
+ {
+ "Name": "Wallet36",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N37",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node37",
+ "Wallets": [
+ {
+ "Name": "Wallet37",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N38",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node38",
+ "Wallets": [
+ {
+ "Name": "Wallet38",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N39",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node39",
+ "Wallets": [
+ {
+ "Name": "Wallet39",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N40",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node40",
+ "Wallets": [
+ {
+ "Name": "Wallet40",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N41",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node41",
+ "Wallets": [
+ {
+ "Name": "Wallet41",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N42",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node42",
+ "Wallets": [
+ {
+ "Name": "Wallet42",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N43",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node43",
+ "Wallets": [
+ {
+ "Name": "Wallet43",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N44",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node44",
+ "Wallets": [
+ {
+ "Name": "Wallet44",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N45",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node45",
+ "Wallets": [
+ {
+ "Name": "Wallet45",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N46",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node46",
+ "Wallets": [
+ {
+ "Name": "Wallet46",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N47",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node47",
+ "Wallets": [
+ {
+ "Name": "Wallet47",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N48",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node48",
+ "Wallets": [
+ {
+ "Name": "Wallet48",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N49",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node49",
+ "Wallets": [
+ {
+ "Name": "Wallet49",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N50",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node50",
+ "Wallets": [
+ {
+ "Name": "Wallet50",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N51",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node51",
+ "Wallets": [
+ {
+ "Name": "Wallet51",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N52",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node52",
+ "Wallets": [
+ {
+ "Name": "Wallet52",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N53",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node53",
+ "Wallets": [
+ {
+ "Name": "Wallet53",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N54",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node54",
+ "Wallets": [
+ {
+ "Name": "Wallet54",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N55",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node55",
+ "Wallets": [
+ {
+ "Name": "Wallet55",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N56",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node56",
+ "Wallets": [
+ {
+ "Name": "Wallet56",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N57",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node57",
+ "Wallets": [
+ {
+ "Name": "Wallet57",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N58",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node58",
+ "Wallets": [
+ {
+ "Name": "Wallet58",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N59",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node59",
+ "Wallets": [
+ {
+ "Name": "Wallet59",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N60",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node60",
+ "Wallets": [
+ {
+ "Name": "Wallet60",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N61",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node61",
+ "Wallets": [
+ {
+ "Name": "Wallet61",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N62",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node62",
+ "Wallets": [
+ {
+ "Name": "Wallet62",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N63",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node63",
+ "Wallets": [
+ {
+ "Name": "Wallet63",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N64",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node64",
+ "Wallets": [
+ {
+ "Name": "Wallet64",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N65",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node65",
+ "Wallets": [
+ {
+ "Name": "Wallet65",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N66",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node66",
+ "Wallets": [
+ {
+ "Name": "Wallet66",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N67",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node67",
+ "Wallets": [
+ {
+ "Name": "Wallet67",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N68",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node68",
+ "Wallets": [
+ {
+ "Name": "Wallet68",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N69",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node69",
+ "Wallets": [
+ {
+ "Name": "Wallet69",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N70",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node70",
+ "Wallets": [
+ {
+ "Name": "Wallet70",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N71",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node71",
+ "Wallets": [
+ {
+ "Name": "Wallet71",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N72",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node72",
+ "Wallets": [
+ {
+ "Name": "Wallet72",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N73",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node73",
+ "Wallets": [
+ {
+ "Name": "Wallet73",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N74",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node74",
+ "Wallets": [
+ {
+ "Name": "Wallet74",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N75",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node75",
+ "Wallets": [
+ {
+ "Name": "Wallet75",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N76",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node76",
+ "Wallets": [
+ {
+ "Name": "Wallet76",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N77",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node77",
+ "Wallets": [
+ {
+ "Name": "Wallet77",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N78",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node78",
+ "Wallets": [
+ {
+ "Name": "Wallet78",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N79",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node79",
+ "Wallets": [
+ {
+ "Name": "Wallet79",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N80",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node80",
+ "Wallets": [
+ {
+ "Name": "Wallet80",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N81",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node81",
+ "Wallets": [
+ {
+ "Name": "Wallet81",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N82",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node82",
+ "Wallets": [
+ {
+ "Name": "Wallet82",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N83",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node83",
+ "Wallets": [
+ {
+ "Name": "Wallet83",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N84",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node84",
+ "Wallets": [
+ {
+ "Name": "Wallet84",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N85",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node85",
+ "Wallets": [
+ {
+ "Name": "Wallet85",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N86",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node86",
+ "Wallets": [
+ {
+ "Name": "Wallet86",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N87",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node87",
+ "Wallets": [
+ {
+ "Name": "Wallet87",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N88",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node88",
+ "Wallets": [
+ {
+ "Name": "Wallet88",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N89",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node89",
+ "Wallets": [
+ {
+ "Name": "Wallet89",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N90",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node90",
+ "Wallets": [
+ {
+ "Name": "Wallet90",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N91",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node91",
+ "Wallets": [
+ {
+ "Name": "Wallet91",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N92",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node92",
+ "Wallets": [
+ {
+ "Name": "Wallet92",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N93",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node93",
+ "Wallets": [
+ {
+ "Name": "Wallet93",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N94",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node94",
+ "Wallets": [
+ {
+ "Name": "Wallet94",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N95",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node95",
+ "Wallets": [
+ {
+ "Name": "Wallet95",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N96",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node96",
+ "Wallets": [
+ {
+ "Name": "Wallet96",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N97",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node97",
+ "Wallets": [
+ {
+ "Name": "Wallet97",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N98",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node98",
+ "Wallets": [
+ {
+ "Name": "Wallet98",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N99",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node99",
+ "Wallets": [
+ {
+ "Name": "Wallet99",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N100",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node100",
+ "Wallets": [
+ {
+ "Name": "Wallet100",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N101",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node101",
+ "Wallets": [
+ {
+ "Name": "Wallet101",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N102",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node102",
+ "Wallets": [
+ {
+ "Name": "Wallet102",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N103",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node103",
+ "Wallets": [
+ {
+ "Name": "Wallet103",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N104",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node104",
+ "Wallets": [
+ {
+ "Name": "Wallet104",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N105",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node105",
+ "Wallets": [
+ {
+ "Name": "Wallet105",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N106",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node106",
+ "Wallets": [
+ {
+ "Name": "Wallet106",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N107",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node107",
+ "Wallets": [
+ {
+ "Name": "Wallet107",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N108",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node108",
+ "Wallets": [
+ {
+ "Name": "Wallet108",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N109",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node109",
+ "Wallets": [
+ {
+ "Name": "Wallet109",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N110",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node110",
+ "Wallets": [
+ {
+ "Name": "Wallet110",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N111",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node111",
+ "Wallets": [
+ {
+ "Name": "Wallet111",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N112",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node112",
+ "Wallets": [
+ {
+ "Name": "Wallet112",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N113",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node113",
+ "Wallets": [
+ {
+ "Name": "Wallet113",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N114",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node114",
+ "Wallets": [
+ {
+ "Name": "Wallet114",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N115",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node115",
+ "Wallets": [
+ {
+ "Name": "Wallet115",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N116",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node116",
+ "Wallets": [
+ {
+ "Name": "Wallet116",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N117",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node117",
+ "Wallets": [
+ {
+ "Name": "Wallet117",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N118",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node118",
+ "Wallets": [
+ {
+ "Name": "Wallet118",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N119",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node119",
+ "Wallets": [
+ {
+ "Name": "Wallet119",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N120",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node120",
+ "Wallets": [
+ {
+ "Name": "Wallet120",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N121",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node121",
+ "Wallets": [
+ {
+ "Name": "Wallet121",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N122",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node122",
+ "Wallets": [
+ {
+ "Name": "Wallet122",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N123",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node123",
+ "Wallets": [
+ {
+ "Name": "Wallet123",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N124",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node124",
+ "Wallets": [
+ {
+ "Name": "Wallet124",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N125",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node125",
+ "Wallets": [
+ {
+ "Name": "Wallet125",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N126",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node126",
+ "Wallets": [
+ {
+ "Name": "Wallet126",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N127",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node127",
+ "Wallets": [
+ {
+ "Name": "Wallet127",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N128",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node128",
+ "Wallets": [
+ {
+ "Name": "Wallet128",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N129",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node129",
+ "Wallets": [
+ {
+ "Name": "Wallet129",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N130",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node130",
+ "Wallets": [
+ {
+ "Name": "Wallet130",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet131",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet132",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet133",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet134",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet135",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet136",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet137",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet138",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet139",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet140",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode11",
+ "Wallets": [
+ {
+ "Name": "Wallet141",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode12",
+ "Wallets": [
+ {
+ "Name": "Wallet142",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode13",
+ "Wallets": [
+ {
+ "Name": "Wallet143",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode14",
+ "Wallets": [
+ {
+ "Name": "Wallet144",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode15",
+ "Wallets": [
+ {
+ "Name": "Wallet145",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode16",
+ "Wallets": [
+ {
+ "Name": "Wallet146",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/recipe.json b/test/testdata/deployednettemplates/recipes/mmnet/recipe.json
new file mode 100644
index 000000000..c2d860eed
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile": "genesis.json",
+ "NetworkFile": "net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/topology.json b/test/testdata/deployednettemplates/recipes/mmnet/topology.json
new file mode 100644
index 000000000..679b8b47f
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/mmnet/topology.json
@@ -0,0 +1,1164 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R5",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R6",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R7",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R8",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R9",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R10",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R11",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R12",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R13",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R14",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R15",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R16",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R17",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R18",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R19",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R20",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N6",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N7",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N8",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N9",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N10",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N11",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N12",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N13",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N14",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N15",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N16",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N17",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N18",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N19",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N20",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN2",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R21",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R22",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R23",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R24",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R25",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R26",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R27",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R28",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R29",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R30",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R31",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R32",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R33",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R34",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R35",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R36",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R37",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R38",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R39",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R40",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N21",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N22",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N23",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N24",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N25",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N26",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N27",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N28",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N29",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N30",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N31",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N32",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N33",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N34",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N35",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N36",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N37",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N38",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N39",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N40",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN3",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN4",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R41",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R42",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R43",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R44",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R45",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R46",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R47",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R48",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R49",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R50",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N41",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N42",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N43",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N44",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N45",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N46",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N47",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N48",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N49",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N50",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N51",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N52",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N53",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N54",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N55",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N56",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N57",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N58",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N59",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N60",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN5",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN6",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R51",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R52",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R53",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R54",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R55",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R56",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N61",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N62",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N63",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N64",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N65",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN7",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R57",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R58",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R59",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R60",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R61",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R62",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R63",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R64",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R65",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R66",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N66",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N67",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N68",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N69",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N70",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N71",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N72",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N73",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N74",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N75",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN8",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN9",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R67",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R68",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R69",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R70",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R71",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R72",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R73",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R74",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R75",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R76",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R77",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R78",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R79",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R80",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N76",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N77",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N78",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N79",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N80",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N81",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N82",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "N83",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN10",
+ "Template": "AWS-EU-WEST-1-m5d.4xl"
+ },
+ {
+ "Name": "R81",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R82",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N84",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N85",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N86",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N87",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N88",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N89",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN11",
+ "Template": "AWS-EU-NORTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R83",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R84",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R85",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R86",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N90",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N91",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N92",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N93",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN12",
+ "Template": "AWS-EU-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R87",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R88",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R89",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R90",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R91",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N94",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N95",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N96",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N97",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N98",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N99",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N100",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N101",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N102",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N103",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN13",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN14",
+ "Template": "AWS-AP-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R92",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R93",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R94",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N104",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N105",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN15",
+ "Template": "AWS-AP-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R95",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R96",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R97",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R98",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R99",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R100",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R101",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R102",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R103",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R104",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R105",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R106",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N106",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N107",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN16",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R107",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R108",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R109",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R110",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N108",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N109",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N110",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N111",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN17",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R111",
+ "Template": "AWS-AP-NORTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N112",
+ "Template": "AWS-AP-NORTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N113",
+ "Template": "AWS-AP-NORTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN18",
+ "Template": "AWS-AP-NORTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R112",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R113",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R114",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R115",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R116",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R117",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R118",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R119",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R120",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R121",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R122",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R123",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R124",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R125",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R126",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N114",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N115",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N116",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N117",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N118",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N119",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N120",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N121",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N122",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N123",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N124",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "N125",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "NPN19",
+ "Template": "AWS-AP-NORTHEAST-3-m5d.4xl"
+ },
+ {
+ "Name": "R127",
+ "Template": "AWS-ME-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R128",
+ "Template": "AWS-ME-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N126",
+ "Template": "AWS-ME-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N127",
+ "Template": "AWS-ME-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN20",
+ "Template": "AWS-ME-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R129",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R130",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R131",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R132",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "N128",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN21",
+ "Template": "AWS-AF-SOUTH-1-m5d.4xl"
+ },
+ {
+ "Name": "R133",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R134",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R135",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R136",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N129",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N130",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N131",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "N132",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN22",
+ "Template": "AWS-SA-EAST-1-m5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
new file mode 100644
index 000000000..dc973560b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
@@ -0,0 +1,13 @@
+# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance
+PARAMS=-w 20 -R 8 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: net.json genesis.json
+
+net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py b/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py
new file mode 100644
index 000000000..07d14a4df
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/gen_topology.py
@@ -0,0 +1,27 @@
+# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance
+node_types = {"R":8, "N":20, "NPN":10}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.2xl", "NPN":"-m5d.2xl"}
+regions = [
+ "AWS-US-EAST-2"
+]
+
+f = open("topology.json", "w")
+f.write("{ \"Hosts\":\n [")
+
+region_count = len(regions)
+first = True
+for x in sorted(node_types.keys()):
+ node_type = x
+ node_count = node_types[x]
+ region_size = node_size[x]
+ for i in range(node_count):
+ node_name = node_type + str(i+1)
+ region = regions[i%region_count]
+ if (first ):
+ first = False
+ else:
+ f.write(",")
+ f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Template\": \"" + region + region_size + "\"\n }" )
+
+f.write("\n ]\n}\n")
+f.close()
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json
new file mode 100644
index 000000000..c66d0c920
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json
@@ -0,0 +1,164 @@
+{
+ "NetworkName": "b09",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 22000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 5,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/net.json b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
new file mode 100644
index 000000000..b1a5a3307
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
@@ -0,0 +1,864 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay6",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay7",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay8",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/node.json b/test/testdata/deployednettemplates/recipes/scenario1s/node.json
new file mode 100644
index 000000000..fad27f5fe
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/node.json
@@ -0,0 +1,23 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": false,
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
+ "AltConfigs": [
+ {
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
+ "FractionApply": 0.2
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json b/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json
new file mode 100644
index 000000000..48f453684
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/recipe.json b/test/testdata/deployednettemplates/recipes/scenario1s/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/topology.json b/test/testdata/deployednettemplates/recipes/scenario1s/topology.json
new file mode 100644
index 000000000..c53f19cd5
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/topology.json
@@ -0,0 +1,156 @@
+{ "Hosts":
+ [
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N6",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N7",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N8",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N9",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N10",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N11",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N12",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N13",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N14",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N15",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N16",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N17",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N18",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N19",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N20",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN2",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN3",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN4",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN5",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN6",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN7",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN8",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN9",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "NPN10",
+ "Template": "AWS-US-EAST-2-m5d.2xl"
+ },
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R5",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R6",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R7",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R8",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/Makefile b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
new file mode 100644
index 000000000..f49294616
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
@@ -0,0 +1,18 @@
+# scenario3s is scenario3 but smaller. (10000 wallets -> 500) (1000 algod participating nodes -> 100) It still keeps a global datacenter distribution.
+PARAMS=-w 500 -R 20 -N 100 -n 100 -H 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+SOURCES=node.json ${GOPATH}/bin/netgoal Makefile relay.json nonPartNode.json
+
+all: net.json genesis.json
+
+net.json: ${SOURCES}
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: ${SOURCES}
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+ mv genesis.json genesis.json.orig
+ jq '.NetworkName="bs3s"|.ConsensusProtocol="future"|.LastPartKeyRound=5000' < genesis.json.orig > genesis.json
+ rm genesis.json.orig
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/gen_topology.py b/test/testdata/deployednettemplates/recipes/scenario3s/gen_topology.py
new file mode 100644
index 000000000..e8c12a1d4
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/gen_topology.py
@@ -0,0 +1,33 @@
+# scenario3s is scenario3 but smaller. (10000 wallets -> 500) (1000 algod participating nodes -> 100) It still keeps a global datacenter distribution.
+node_types = {"R":20, "N":100, "NPN":15}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.2xl", "NPN":"-m5d.4xl"}
+regions = [
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-2",
+ "AWS-CA-CENTRAL-1",
+ "AWS-AP-SOUTHEAST-1",
+ "AWS-AP-SOUTHEAST-2",
+ "AWS-EU-CENTRAL-1",
+ "AWS-EU-WEST-2",
+ "AWS-EU-WEST-3"
+]
+
+f = open("topology.json", "w")
+f.write("{ \"Hosts\":\n [")
+
+region_count = len(regions)
+first = True
+for node_type in sorted(node_types.keys()):
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ node_name = node_type + str(i+1)
+ region = regions[i%region_count]
+ if (first ):
+ first = False
+ else:
+ f.write(",")
+ f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Template\": \"" + region + region_size + "\"\n }" )
+
+f.write("\n ]\n}\n")
+f.close()
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json b/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json
new file mode 100644
index 000000000..df2ddd578
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json
@@ -0,0 +1,2589 @@
+{
+ "NetworkName": "bs3s",
+ "VersionModifier": "",
+ "ConsensusProtocol": "future",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 5000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet31",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet32",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet33",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet34",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet35",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet36",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet37",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet38",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet39",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet40",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet41",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet42",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet43",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet44",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet45",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet46",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet47",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet48",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet49",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet50",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet51",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet52",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet53",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet54",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet55",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet56",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet57",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet58",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet59",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet60",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet61",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet62",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet63",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet64",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet65",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet66",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet67",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet68",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet69",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet70",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet71",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet72",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet73",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet74",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet75",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet76",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet77",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet78",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet79",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet80",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet81",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet82",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet83",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet84",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet85",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet86",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet87",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet88",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet89",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet90",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet91",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet92",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet93",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet94",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet95",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet96",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet97",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet98",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet99",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet100",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet101",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet102",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet103",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet104",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet105",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet106",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet107",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet108",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet109",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet110",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet111",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet112",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet113",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet114",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet115",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet116",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet117",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet118",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet119",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet120",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet121",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet122",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet123",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet124",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet125",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet126",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet127",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet128",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet129",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet130",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet131",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet132",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet133",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet134",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet135",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet136",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet137",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet138",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet139",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet140",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet141",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet142",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet143",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet144",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet145",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet146",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet147",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet148",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet149",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet150",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet151",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet152",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet153",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet154",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet155",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet156",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet157",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet158",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet159",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet160",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet161",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet162",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet163",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet164",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet165",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet166",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet167",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet168",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet169",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet170",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet171",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet172",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet173",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet174",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet175",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet176",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet177",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet178",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet179",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet180",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet181",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet182",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet183",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet184",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet185",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet186",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet187",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet188",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet189",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet190",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet191",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet192",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet193",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet194",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet195",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet196",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet197",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet198",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet199",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet200",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet201",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet202",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet203",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet204",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet205",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet206",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet207",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet208",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet209",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet210",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet211",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet212",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet213",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet214",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet215",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet216",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet217",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet218",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet219",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet220",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet221",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet222",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet223",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet224",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet225",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet226",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet227",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet228",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet229",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet230",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet231",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet232",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet233",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet234",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet235",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet236",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet237",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet238",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet239",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet240",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet241",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet242",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet243",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet244",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet245",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet246",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet247",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet248",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet249",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet250",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet251",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet252",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet253",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet254",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet255",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet256",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet257",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet258",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet259",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet260",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet261",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet262",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet263",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet264",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet265",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet266",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet267",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet268",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet269",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet270",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet271",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet272",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet273",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet274",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet275",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet276",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet277",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet278",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet279",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet280",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet281",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet282",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet283",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet284",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet285",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet286",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet287",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet288",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet289",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet290",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet291",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet292",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet293",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet294",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet295",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet296",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet297",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet298",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet299",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet300",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet301",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet302",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet303",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet304",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet305",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet306",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet307",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet308",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet309",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet310",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet311",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet312",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet313",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet314",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet315",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet316",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet317",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet318",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet319",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet320",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet321",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet322",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet323",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet324",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet325",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet326",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet327",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet328",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet329",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet330",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet331",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet332",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet333",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet334",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet335",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet336",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet337",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet338",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet339",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet340",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet341",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet342",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet343",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet344",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet345",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet346",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet347",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet348",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet349",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet350",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet351",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet352",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet353",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet354",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet355",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet356",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet357",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet358",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet359",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet360",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet361",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet362",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet363",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet364",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet365",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet366",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet367",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet368",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet369",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet370",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet371",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet372",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet373",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet374",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet375",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet376",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet377",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet378",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet379",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet380",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet381",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet382",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet383",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet384",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet385",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet386",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet387",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet388",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet389",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet390",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet391",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet392",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet393",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet394",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet395",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet396",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet397",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet398",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet399",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet400",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet401",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet402",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet403",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet404",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet405",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet406",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet407",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet408",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet409",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet410",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet411",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet412",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet413",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet414",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet415",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet416",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet417",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet418",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet419",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet420",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet421",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet422",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet423",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet424",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet425",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet426",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet427",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet428",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet429",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet430",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet431",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet432",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet433",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet434",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet435",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet436",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet437",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet438",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet439",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet440",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet441",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet442",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet443",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet444",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet445",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet446",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet447",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet448",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet449",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet450",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet451",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet452",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet453",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet454",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet455",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet456",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet457",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet458",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet459",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet460",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet461",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet462",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet463",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet464",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet465",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet466",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet467",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet468",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet469",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet470",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet471",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet472",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet473",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet474",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet475",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet476",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet477",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet478",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet479",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet480",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet481",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet482",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet483",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet484",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet485",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet486",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet487",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet488",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet489",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet490",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet491",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet492",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet493",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet494",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet495",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet496",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet497",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet498",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet499",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet500",
+ "Stake": 0.1,
+ "Online": true
+ },
+ {
+ "Name": "Wallet501",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet502",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet503",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet504",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet505",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet506",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet507",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet508",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet509",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet510",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet511",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet512",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet513",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet514",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ },
+ {
+ "Name": "Wallet515",
+ "Stake": 3.3333333333333335,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/net.json b/test/testdata/deployednettemplates/recipes/scenario3s/net.json
new file mode 100644
index 000000000..fb4b8be19
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/net.json
@@ -0,0 +1,4749 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay6",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay7",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay8",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay9",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay10",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay11",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay12",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay13",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay14",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay15",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay16",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay17",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay18",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay19",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "R20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay20",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet101",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet201",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet301",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet401",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet102",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet202",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet302",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet402",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet103",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet203",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet303",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet403",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet104",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet204",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet304",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet404",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet105",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet205",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet305",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet405",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet106",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet206",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet306",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet406",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet107",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet207",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet307",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet407",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet108",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet208",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet308",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet408",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet109",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet209",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet309",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet409",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet110",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet210",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet310",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet410",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet111",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet211",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet311",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet411",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet112",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet212",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet312",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet412",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet113",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet213",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet313",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet413",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet114",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet214",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet314",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet414",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet115",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet215",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet315",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet415",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N16",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet116",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet216",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet316",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet416",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N17",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet117",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet217",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet317",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet417",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N18",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet118",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet218",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet318",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet418",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N19",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet119",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet219",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet319",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet419",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N20",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet120",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet220",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet320",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet420",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N21",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node21",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet121",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet221",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet321",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet421",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N22",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node22",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet122",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet222",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet322",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet422",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N23",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node23",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet123",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet223",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet323",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet423",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N24",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node24",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet124",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet224",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet324",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet424",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N25",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node25",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet125",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet225",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet325",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet425",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N26",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node26",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet126",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet226",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet326",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet426",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N27",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node27",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet127",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet227",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet327",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet427",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N28",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node28",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet128",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet228",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet328",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet428",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N29",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node29",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet129",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet229",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet329",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet429",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N30",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node30",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet130",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet230",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet330",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet430",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N31",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node31",
+ "Wallets": [
+ {
+ "Name": "Wallet31",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet131",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet231",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet331",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet431",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N32",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node32",
+ "Wallets": [
+ {
+ "Name": "Wallet32",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet132",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet232",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet332",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet432",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N33",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node33",
+ "Wallets": [
+ {
+ "Name": "Wallet33",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet133",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet233",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet333",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet433",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N34",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node34",
+ "Wallets": [
+ {
+ "Name": "Wallet34",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet134",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet234",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet334",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet434",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N35",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node35",
+ "Wallets": [
+ {
+ "Name": "Wallet35",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet135",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet235",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet335",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet435",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N36",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node36",
+ "Wallets": [
+ {
+ "Name": "Wallet36",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet136",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet236",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet336",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet436",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N37",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node37",
+ "Wallets": [
+ {
+ "Name": "Wallet37",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet137",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet237",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet337",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet437",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N38",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node38",
+ "Wallets": [
+ {
+ "Name": "Wallet38",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet138",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet238",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet338",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet438",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N39",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node39",
+ "Wallets": [
+ {
+ "Name": "Wallet39",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet139",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet239",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet339",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet439",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N40",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node40",
+ "Wallets": [
+ {
+ "Name": "Wallet40",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet140",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet240",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet340",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet440",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N41",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node41",
+ "Wallets": [
+ {
+ "Name": "Wallet41",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet141",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet241",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet341",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet441",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N42",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node42",
+ "Wallets": [
+ {
+ "Name": "Wallet42",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet142",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet242",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet342",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet442",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N43",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node43",
+ "Wallets": [
+ {
+ "Name": "Wallet43",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet143",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet243",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet343",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet443",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N44",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node44",
+ "Wallets": [
+ {
+ "Name": "Wallet44",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet144",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet244",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet344",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet444",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N45",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node45",
+ "Wallets": [
+ {
+ "Name": "Wallet45",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet145",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet245",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet345",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet445",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N46",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node46",
+ "Wallets": [
+ {
+ "Name": "Wallet46",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet146",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet246",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet346",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet446",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N47",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node47",
+ "Wallets": [
+ {
+ "Name": "Wallet47",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet147",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet247",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet347",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet447",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N48",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node48",
+ "Wallets": [
+ {
+ "Name": "Wallet48",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet148",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet248",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet348",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet448",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N49",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node49",
+ "Wallets": [
+ {
+ "Name": "Wallet49",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet149",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet249",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet349",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet449",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N50",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node50",
+ "Wallets": [
+ {
+ "Name": "Wallet50",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet150",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet250",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet350",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet450",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N51",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node51",
+ "Wallets": [
+ {
+ "Name": "Wallet51",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet151",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet251",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet351",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet451",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N52",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node52",
+ "Wallets": [
+ {
+ "Name": "Wallet52",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet152",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet252",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet352",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet452",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N53",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node53",
+ "Wallets": [
+ {
+ "Name": "Wallet53",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet153",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet253",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet353",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet453",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N54",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node54",
+ "Wallets": [
+ {
+ "Name": "Wallet54",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet154",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet254",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet354",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet454",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N55",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node55",
+ "Wallets": [
+ {
+ "Name": "Wallet55",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet155",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet255",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet355",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet455",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N56",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node56",
+ "Wallets": [
+ {
+ "Name": "Wallet56",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet156",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet256",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet356",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet456",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N57",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node57",
+ "Wallets": [
+ {
+ "Name": "Wallet57",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet157",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet257",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet357",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet457",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N58",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node58",
+ "Wallets": [
+ {
+ "Name": "Wallet58",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet158",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet258",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet358",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet458",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N59",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node59",
+ "Wallets": [
+ {
+ "Name": "Wallet59",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet159",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet259",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet359",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet459",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N60",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node60",
+ "Wallets": [
+ {
+ "Name": "Wallet60",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet160",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet260",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet360",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet460",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N61",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node61",
+ "Wallets": [
+ {
+ "Name": "Wallet61",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet161",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet261",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet361",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet461",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N62",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node62",
+ "Wallets": [
+ {
+ "Name": "Wallet62",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet162",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet262",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet362",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet462",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N63",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node63",
+ "Wallets": [
+ {
+ "Name": "Wallet63",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet163",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet263",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet363",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet463",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N64",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node64",
+ "Wallets": [
+ {
+ "Name": "Wallet64",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet164",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet264",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet364",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet464",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N65",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node65",
+ "Wallets": [
+ {
+ "Name": "Wallet65",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet165",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet265",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet365",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet465",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N66",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node66",
+ "Wallets": [
+ {
+ "Name": "Wallet66",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet166",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet266",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet366",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet466",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N67",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node67",
+ "Wallets": [
+ {
+ "Name": "Wallet67",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet167",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet267",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet367",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet467",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N68",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node68",
+ "Wallets": [
+ {
+ "Name": "Wallet68",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet168",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet268",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet368",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet468",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N69",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node69",
+ "Wallets": [
+ {
+ "Name": "Wallet69",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet169",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet269",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet369",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet469",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N70",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node70",
+ "Wallets": [
+ {
+ "Name": "Wallet70",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet170",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet270",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet370",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet470",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N71",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node71",
+ "Wallets": [
+ {
+ "Name": "Wallet71",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet171",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet271",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet371",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet471",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N72",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node72",
+ "Wallets": [
+ {
+ "Name": "Wallet72",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet172",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet272",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet372",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet472",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N73",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node73",
+ "Wallets": [
+ {
+ "Name": "Wallet73",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet173",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet273",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet373",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet473",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N74",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node74",
+ "Wallets": [
+ {
+ "Name": "Wallet74",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet174",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet274",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet374",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet474",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N75",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node75",
+ "Wallets": [
+ {
+ "Name": "Wallet75",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet175",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet275",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet375",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet475",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N76",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node76",
+ "Wallets": [
+ {
+ "Name": "Wallet76",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet176",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet276",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet376",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet476",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N77",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node77",
+ "Wallets": [
+ {
+ "Name": "Wallet77",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet177",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet277",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet377",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet477",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N78",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node78",
+ "Wallets": [
+ {
+ "Name": "Wallet78",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet178",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet278",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet378",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet478",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N79",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node79",
+ "Wallets": [
+ {
+ "Name": "Wallet79",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet179",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet279",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet379",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet479",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N80",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node80",
+ "Wallets": [
+ {
+ "Name": "Wallet80",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet180",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet280",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet380",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet480",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N81",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node81",
+ "Wallets": [
+ {
+ "Name": "Wallet81",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet181",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet281",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet381",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet481",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N82",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node82",
+ "Wallets": [
+ {
+ "Name": "Wallet82",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet182",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet282",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet382",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet482",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N83",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node83",
+ "Wallets": [
+ {
+ "Name": "Wallet83",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet183",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet283",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet383",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet483",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N84",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node84",
+ "Wallets": [
+ {
+ "Name": "Wallet84",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet184",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet284",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet384",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet484",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N85",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node85",
+ "Wallets": [
+ {
+ "Name": "Wallet85",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet185",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet285",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet385",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet485",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N86",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node86",
+ "Wallets": [
+ {
+ "Name": "Wallet86",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet186",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet286",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet386",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet486",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N87",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node87",
+ "Wallets": [
+ {
+ "Name": "Wallet87",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet187",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet287",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet387",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet487",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N88",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node88",
+ "Wallets": [
+ {
+ "Name": "Wallet88",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet188",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet288",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet388",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet488",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N89",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node89",
+ "Wallets": [
+ {
+ "Name": "Wallet89",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet189",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet289",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet389",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet489",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N90",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node90",
+ "Wallets": [
+ {
+ "Name": "Wallet90",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet190",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet290",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet390",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet490",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N91",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node91",
+ "Wallets": [
+ {
+ "Name": "Wallet91",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet191",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet291",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet391",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet491",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N92",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node92",
+ "Wallets": [
+ {
+ "Name": "Wallet92",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet192",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet292",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet392",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet492",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N93",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node93",
+ "Wallets": [
+ {
+ "Name": "Wallet93",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet193",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet293",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet393",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet493",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N94",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node94",
+ "Wallets": [
+ {
+ "Name": "Wallet94",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet194",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet294",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet394",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet494",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N95",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node95",
+ "Wallets": [
+ {
+ "Name": "Wallet95",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet195",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet295",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet395",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet495",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N96",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node96",
+ "Wallets": [
+ {
+ "Name": "Wallet96",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet196",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet296",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet396",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet496",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N97",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node97",
+ "Wallets": [
+ {
+ "Name": "Wallet97",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet197",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet297",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet397",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet497",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N98",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node98",
+ "Wallets": [
+ {
+ "Name": "Wallet98",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet198",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet298",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet398",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet498",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N99",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node99",
+ "Wallets": [
+ {
+ "Name": "Wallet99",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet199",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet299",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet399",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet499",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N100",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node100",
+ "Wallets": [
+ {
+ "Name": "Wallet100",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet200",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet300",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet400",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet500",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet501",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet502",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet503",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet504",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet505",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet506",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet507",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet508",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet509",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet510",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN11",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode11",
+ "Wallets": [
+ {
+ "Name": "Wallet511",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN12",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode12",
+ "Wallets": [
+ {
+ "Name": "Wallet512",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN13",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode13",
+ "Wallets": [
+ {
+ "Name": "Wallet513",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN14",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode14",
+ "Wallets": [
+ {
+ "Name": "Wallet514",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN15",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode15",
+ "Wallets": [
+ {
+ "Name": "Wallet515",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/node.json b/test/testdata/deployednettemplates/recipes/scenario3s/node.json
new file mode 100644
index 000000000..b4f495e8e
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/node.json
@@ -0,0 +1,23 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": false,
+ "EnableTelemetry": false,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": false,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
+ "AltConfigs": [
+ {
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
+ "FractionApply": 0.2
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/nonPartNode.json b/test/testdata/deployednettemplates/recipes/scenario3s/nonPartNode.json
new file mode 100644
index 000000000..9987df2f7
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/nonPartNode.json
@@ -0,0 +1,7 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableMetrics": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 3, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/recipe.json b/test/testdata/deployednettemplates/recipes/scenario3s/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/relay.json b/test/testdata/deployednettemplates/recipes/scenario3s/relay.json
new file mode 100644
index 000000000..f0d447a81
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/topology.json b/test/testdata/deployednettemplates/recipes/scenario3s/topology.json
new file mode 100644
index 000000000..015e7b6f0
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/topology.json
@@ -0,0 +1,544 @@
+{ "Hosts":
+ [
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N6",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N7",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N8",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N9",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N10",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N11",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N12",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N13",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N14",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N15",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N16",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N17",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N18",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N19",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N20",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N21",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N22",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N23",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N24",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N25",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N26",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N27",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N28",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N29",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N30",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N31",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N32",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N33",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N34",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N35",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N36",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N37",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N38",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N39",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N40",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N41",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N42",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N43",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N44",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N45",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N46",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N47",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N48",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N49",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N50",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N51",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N52",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N53",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N54",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N55",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N56",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N57",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N58",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N59",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N60",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N61",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N62",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N63",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N64",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N65",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N66",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N67",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N68",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N69",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N70",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N71",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N72",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N73",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N74",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N75",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N76",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N77",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N78",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N79",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N80",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N81",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N82",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N83",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N84",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N85",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N86",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N87",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N88",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N89",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N90",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N91",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N92",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N93",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.2xl"
+ },
+ {
+ "Name": "N94",
+ "Template": "AWS-EU-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N95",
+ "Template": "AWS-EU-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N96",
+ "Template": "AWS-EU-WEST-3-m5d.2xl"
+ },
+ {
+ "Name": "N97",
+ "Template": "AWS-US-EAST-1-m5d.2xl"
+ },
+ {
+ "Name": "N98",
+ "Template": "AWS-US-WEST-2-m5d.2xl"
+ },
+ {
+ "Name": "N99",
+ "Template": "AWS-CA-CENTRAL-1-m5d.2xl"
+ },
+ {
+ "Name": "N100",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.2xl"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN2",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN3",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN4",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN5",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN6",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN7",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN8",
+ "Template": "AWS-EU-WEST-3-m5d.4xl"
+ },
+ {
+ "Name": "NPN9",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN10",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN11",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN12",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN13",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN14",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "NPN15",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R5",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R6",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R7",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R8",
+ "Template": "AWS-EU-WEST-3-m5d.4xl"
+ },
+ {
+ "Name": "R9",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R10",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R11",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R12",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R13",
+ "Template": "AWS-AP-SOUTHEAST-2-m5d.4xl"
+ },
+ {
+ "Name": "R14",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R15",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R16",
+ "Template": "AWS-EU-WEST-3-m5d.4xl"
+ },
+ {
+ "Name": "R17",
+ "Template": "AWS-US-EAST-1-m5d.4xl"
+ },
+ {
+ "Name": "R18",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "R19",
+ "Template": "AWS-CA-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "R20",
+ "Template": "AWS-AP-SOUTHEAST-1-m5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/CompactCert.json b/test/testdata/nettemplates/CompactCert.json
deleted file mode 100644
index f4f86e261..000000000
--- a/test/testdata/nettemplates/CompactCert.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-compactcert",
- "LastPartKeyRound": 3000,
- "Wallets": [
- { "Name": "Wallet0", "Stake": 10, "Online": true },
- { "Name": "Wallet1", "Stake": 10, "Online": true },
- { "Name": "Wallet2", "Stake": 10, "Online": true },
- { "Name": "Wallet3", "Stake": 10, "Online": true },
- { "Name": "Wallet4", "Stake": 10, "Online": true },
- { "Name": "Wallet5", "Stake": 10, "Online": true },
- { "Name": "Wallet6", "Stake": 10, "Online": true },
- { "Name": "Wallet7", "Stake": 10, "Online": true },
- { "Name": "Wallet8", "Stake": 10, "Online": true },
- { "Name": "Wallet9", "Stake": 10, "Online": true }
- ]
- },
- "Nodes": [
- {
- "Name": "Relay0",
- "IsRelay": true,
- "Wallets": []
- },
- {
- "Name": "Relay1",
- "IsRelay": true,
- "Wallets": []
- },
- { "Name": "Node0", "Wallets": [ { "Name": "Wallet0", "ParticipationOnly": false } ] },
- { "Name": "Node1", "Wallets": [ { "Name": "Wallet1", "ParticipationOnly": false } ] },
- { "Name": "Node2", "Wallets": [ { "Name": "Wallet2", "ParticipationOnly": false } ] },
- { "Name": "Node3", "Wallets": [ { "Name": "Wallet3", "ParticipationOnly": false } ] },
- { "Name": "Node4", "Wallets": [ { "Name": "Wallet4", "ParticipationOnly": false } ] },
- { "Name": "Node5", "Wallets": [ { "Name": "Wallet5", "ParticipationOnly": false } ] },
- { "Name": "Node6", "Wallets": [ { "Name": "Wallet6", "ParticipationOnly": false } ] },
- { "Name": "Node7", "Wallets": [ { "Name": "Wallet7", "ParticipationOnly": false } ] },
- { "Name": "Node8", "Wallets": [ { "Name": "Wallet8", "ParticipationOnly": false } ] },
- { "Name": "Node9", "Wallets": [ { "Name": "Wallet9", "ParticipationOnly": false } ] }
- ]
-}
diff --git a/test/testdata/nettemplates/RichAccountStateProof.json b/test/testdata/nettemplates/RichAccountStateProof.json
new file mode 100644
index 000000000..e908ec807
--- /dev/null
+++ b/test/testdata/nettemplates/RichAccountStateProof.json
@@ -0,0 +1,31 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "test-fast-stateproofs",
+ "LastPartKeyRound": 100,
+ "Wallets": [
+ { "Name": "richWallet", "Stake": 39, "Online": true },
+ { "Name": "Wallet1", "Stake": 20, "Online": true },
+ { "Name": "Wallet2", "Stake": 20, "Online": true },
+ { "Name": "Wallet3", "Stake": 20, "Online": true },
+ { "Name": "poorWallet", "Stake": 1, "Online": true }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Relay0",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ {
+ "Name": "Relay1",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ { "Name": "richNode", "Wallets": [ { "Name": "richWallet", "ParticipationOnly": false } ] },
+ { "Name": "Node1", "Wallets": [ { "Name": "Wallet1", "ParticipationOnly": false } ] },
+ { "Name": "Node2", "Wallets": [ { "Name": "Wallet2", "ParticipationOnly": false } ] },
+ { "Name": "Node3", "Wallets": [ { "Name": "Wallet3", "ParticipationOnly": false } ] },
+ { "Name": "poorNode", "Wallets": [ { "Name": "poorWallet", "ParticipationOnly": false } ] }
+ ]
+}
diff --git a/test/testdata/nettemplates/StateProof.json b/test/testdata/nettemplates/StateProof.json
new file mode 100644
index 000000000..1194af643
--- /dev/null
+++ b/test/testdata/nettemplates/StateProof.json
@@ -0,0 +1,31 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "test-fast-stateproofs",
+ "LastPartKeyRound": 100,
+ "Wallets": [
+ { "Name": "Wallet0", "Stake": 20, "Online": true },
+ { "Name": "Wallet1", "Stake": 20, "Online": true },
+ { "Name": "Wallet2", "Stake": 20, "Online": true },
+ { "Name": "Wallet3", "Stake": 20, "Online": true },
+ { "Name": "Wallet4", "Stake": 20, "Online": true }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Relay0",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ {
+ "Name": "Relay1",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ { "Name": "Node0", "Wallets": [ { "Name": "Wallet0", "ParticipationOnly": false } ] },
+ { "Name": "Node1", "Wallets": [ { "Name": "Wallet1", "ParticipationOnly": false } ] },
+ { "Name": "Node2", "Wallets": [ { "Name": "Wallet2", "ParticipationOnly": false } ] },
+ { "Name": "Node3", "Wallets": [ { "Name": "Wallet3", "ParticipationOnly": false } ] },
+ { "Name": "Node4", "Wallets": [ { "Name": "Wallet4", "ParticipationOnly": false } ] }
+ ]
+}
diff --git a/test/testdata/nettemplates/StateProofMultiWallets.json b/test/testdata/nettemplates/StateProofMultiWallets.json
new file mode 100644
index 000000000..e71bd1491
--- /dev/null
+++ b/test/testdata/nettemplates/StateProofMultiWallets.json
@@ -0,0 +1,63 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "test-fast-stateproofs",
+ "LastPartKeyRound": 3000,
+ "Wallets": [
+ { "Name": "Wallet0", "Stake": 5, "Online": true },
+ { "Name": "Wallet1", "Stake": 5, "Online": true },
+ { "Name": "Wallet2", "Stake": 5, "Online": true },
+ { "Name": "Wallet3", "Stake": 5, "Online": true },
+ { "Name": "Wallet4", "Stake": 5, "Online": true },
+ { "Name": "Wallet5", "Stake": 5, "Online": true },
+ { "Name": "Wallet6", "Stake": 5, "Online": true },
+ { "Name": "Wallet7", "Stake": 5, "Online": true },
+ { "Name": "Wallet8", "Stake": 5, "Online": true },
+ { "Name": "Wallet9", "Stake": 5, "Online": true },
+ { "Name": "Wallet10", "Stake": 5, "Online": true },
+ { "Name": "Wallet11", "Stake": 5, "Online": true },
+ { "Name": "Wallet12", "Stake": 5, "Online": true },
+ { "Name": "Wallet13", "Stake": 5, "Online": true },
+ { "Name": "Wallet14", "Stake": 5, "Online": true },
+ { "Name": "Wallet15", "Stake": 5, "Online": true },
+ { "Name": "Wallet16", "Stake": 5, "Online": true },
+ { "Name": "Wallet17", "Stake": 5, "Online": true },
+ { "Name": "Wallet18", "Stake": 5, "Online": true },
+ { "Name": "Wallet19", "Stake": 5, "Online": true }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Relay0",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ {
+ "Name": "Relay1",
+ "IsRelay": true,
+ "Wallets": []
+ },
+ { "Name": "Node0", "Wallets": [
+ { "Name": "Wallet0", "ParticipationOnly": false },
+ { "Name": "Wallet1", "ParticipationOnly": false },
+ { "Name": "Wallet2", "ParticipationOnly": false },
+ { "Name": "Wallet3", "ParticipationOnly": false },
+ { "Name": "Wallet4", "ParticipationOnly": false },
+ { "Name": "Wallet5", "ParticipationOnly": false },
+ { "Name": "Wallet6", "ParticipationOnly": false },
+ { "Name": "Wallet7", "ParticipationOnly": false },
+ { "Name": "Wallet8", "ParticipationOnly": false },
+ { "Name": "Wallet9", "ParticipationOnly": false } ] },
+ { "Name": "Node1", "Wallets": [
+ { "Name": "Wallet10", "ParticipationOnly": false },
+ { "Name": "Wallet11", "ParticipationOnly": false },
+ { "Name": "Wallet12", "ParticipationOnly": false },
+ { "Name": "Wallet13", "ParticipationOnly": false },
+ { "Name": "Wallet14", "ParticipationOnly": false },
+ { "Name": "Wallet15", "ParticipationOnly": false },
+ { "Name": "Wallet16", "ParticipationOnly": false },
+ { "Name": "Wallet17", "ParticipationOnly": false },
+ { "Name": "Wallet18", "ParticipationOnly": false },
+ { "Name": "Wallet19", "ParticipationOnly": false }] }
+ ]
+}
diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go
index b5f95a1be..e3ebba922 100644
--- a/tools/debug/algodump/main.go
+++ b/tools/debug/algodump/main.go
@@ -140,7 +140,7 @@ func setDumpHandlers(n network.GossipNode) {
h := []network.TaggedMessageHandler{
{Tag: protocol.AgreementVoteTag, MessageHandler: &dh},
- {Tag: protocol.CompactCertSigTag, MessageHandler: &dh},
+ {Tag: protocol.StateProofSigTag, MessageHandler: &dh},
{Tag: protocol.MsgOfInterestTag, MessageHandler: &dh},
{Tag: protocol.MsgDigestSkipTag, MessageHandler: &dh},
{Tag: protocol.NetPrioResponseTag, MessageHandler: &dh},
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index 85c8804b2..551a32f34 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -23,6 +23,7 @@ package db
import (
"context"
"database/sql"
+ "errors"
"fmt"
"reflect"
"runtime"
@@ -91,7 +92,7 @@ func MakeErasableAccessor(dbfilename string) (Accessor, error) {
}
func makeErasableAccessor(dbfilename string, readOnly bool) (Accessor, error) {
- return makeAccessorImpl(dbfilename, readOnly, false, []string{"_secure_delete=on"})
+ return makeAccessorImpl(dbfilename, readOnly, false, []string{"_secure_delete=on", "_journal_mode=wal"})
}
func makeAccessorImpl(dbfilename string, readOnly bool, inMemory bool, params []string) (Accessor, error) {
@@ -207,6 +208,8 @@ func (db *Accessor) IsSharedCacheConnection() bool {
// Atomic executes a piece of code with respect to the database atomically.
// For transactions where readOnly is false, sync determines whether or not to wait for the result.
+// The return error of fn should be a native sqlite3.Error type or an error wrapping it.
+// DO NOT return a custom error - the internal logic of Atmoic expects an sqlite error and uses that value.
func (db *Accessor) Atomic(fn idemFn, extras ...interface{}) (err error) {
return db.atomic(fn, extras...)
}
@@ -386,8 +389,12 @@ func (db *Accessor) GetPageSize(ctx context.Context) (pageSize uint64, err error
// dbretry returns true if the error might be temporary
func dbretry(obj error) bool {
- err, ok := obj.(sqlite3.Error)
- return ok && (err.Code == sqlite3.ErrLocked || err.Code == sqlite3.ErrBusy)
+ var sqliteErr sqlite3.Error
+ if errors.As(obj, &sqliteErr) {
+ return sqliteErr.Code == sqlite3.ErrLocked || sqliteErr.Code == sqlite3.ErrBusy
+ }
+
+ return false // Not an sqlite error type
}
// IsErrBusy examine the input inerr variable of type error and determine if it's a sqlite3 error for the ErrBusy error code.
diff --git a/util/db/dbutil_test.go b/util/db/dbutil_test.go
index 12d73a86c..5454f73ba 100644
--- a/util/db/dbutil_test.go
+++ b/util/db/dbutil_test.go
@@ -21,10 +21,11 @@ import (
"database/sql"
"errors"
"fmt"
- "io/ioutil"
+ "math/rand"
"os"
"path/filepath"
"runtime"
+ "strings"
"sync/atomic"
"testing"
"time"
@@ -241,11 +242,7 @@ func TestDBConcurrencyRW(t *testing.T) {
dbFolder := "/dev/shm"
os := runtime.GOOS
if os == "darwin" {
- var err error
- dbFolder, err = ioutil.TempDir("", "TestDBConcurrencyRW")
- if err != nil {
- panic(err)
- }
+ dbFolder = t.TempDir()
}
fn := fmt.Sprintf("/%s.%d.sqlite3", t.Name(), crypto.RandUint64())
@@ -477,3 +474,103 @@ func TestReadingWhileWriting(t *testing.T) {
require.Equal(t, 2, count)
}
+
+// using Write-Ahead Logging (WAL)
+func TestLockingTableWhileWritingWAL(t *testing.T) {
+ testLockingTableWhileWriting(t, true)
+}
+
+// using the default Rollback Journal
+func TestLockingTableWhileWritingJournal(t *testing.T) {
+ testLockingTableWhileWriting(t, false)
+}
+
+// testLockingTableWhileWriting tests the locking mechanism when one connection writes to a specific database table, and another reads from a different table.
+// Using the old journaling method, a write-lock completely locks the database file for other connections, however, if we use
+// WAL mode instead, locking a specific table is possible, making concurrent reads more performant.
+func testLockingTableWhileWriting(t *testing.T, useWAL bool) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ dbParams := []string{"_secure_delete=on"} // not required but used in ErasableAccessor, so I'd like it to be tested here as well
+ if useWAL {
+ dbParams = []string{"_secure_delete=on", "_journal_mode=wal"}
+ }
+
+ dbName := strings.Replace(t.Name(), "/", "_", -1) + ".sqlite3"
+
+ writeAcc, err := makeAccessorImpl(dbName, false, false, dbParams)
+ a.NoError(err)
+ defer os.Remove(dbName)
+ defer os.Remove(dbName + "-shm")
+ defer os.Remove(dbName + "-wal")
+ defer writeAcc.Close()
+
+ _, err = writeAcc.Handle.Exec(`CREATE TABLE foo (pk INTEGER PRIMARY KEY, a BLOB, b BLOB)`)
+ a.NoError(err)
+ _, err = writeAcc.Handle.Exec(`CREATE TABLE bar (pk INTEGER PRIMARY KEY, a INTEGER)`)
+ a.NoError(err)
+ _, err = writeAcc.Handle.Exec(`INSERT INTO bar (pk,a) VALUES (1,234)`)
+ a.NoError(err)
+
+ rands := randStringBytes(1024 * 1024 * 40) // 40MB string
+
+ for i := 1; i <= 2; i++ { // Insert some huge blobs
+ err = writeAcc.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("INSERT INTO foo (pk, a, b) VALUES (?, ?, ?)", i, rands, rands)
+ return err
+
+ })
+ a.NoError(err)
+ }
+
+ go func() { // Goroutine reading periodically from a table different from the one being written to.
+ readAcc, err := makeAccessorImpl(dbName, true, false, dbParams)
+ a.NoError(err)
+ defer readAcc.Close()
+
+ for i := 0; i < 40; i++ {
+ time.Sleep(time.Second / 2)
+ fmt.Printf("Reading bar - %d\n", i)
+
+ var x int
+ err = readAcc.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ row := tx.QueryRow(`SELECT a FROM bar WHERE pk=1`)
+ err = row.Scan(&x)
+ if useWAL {
+ a.NoError(err)
+ a.Equal(234, x)
+ } else {
+ if err != nil { // database should be locked very often, but not every time (probabilistic)
+ fmt.Printf("SELECT query failed: %v\n", err)
+ a.ErrorContains(err, "database is locked")
+ }
+ }
+ return err
+ })
+ if useWAL {
+ a.NoError(err)
+ }
+ }
+ }()
+
+ for i := 0; i < 20; i++ { // Update the huge blobs with changing sizes (hold the write-lock for longer than 1 second)
+ fmt.Printf("Updating foo - %d\n", i)
+ rands = rands[1:]
+ err = writeAcc.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("UPDATE foo SET a=?, b=? WHERE pk=?", rands, rands, 1)
+ return err
+ })
+ a.NoError(err)
+ }
+}
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func randStringBytes(n uint) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letterBytes[rand.Intn(len(letterBytes))]
+ }
+ return string(b)
+}
diff --git a/util/db/initialize.go b/util/db/initialize.go
index 71141707c..36bf83770 100644
--- a/util/db/initialize.go
+++ b/util/db/initialize.go
@@ -21,6 +21,8 @@ import (
"database/sql"
"errors"
"fmt"
+
+ "github.com/mattn/go-sqlite3"
)
// Migration is used to upgrade a database from one version to the next.
@@ -32,9 +34,17 @@ type Migration func(ctx context.Context, tx *sql.Tx, newDatabase bool) error
// The Migration slice is ordered and must contain all prior migrations
// in order to determine which need to be called.
func Initialize(accessor Accessor, migrations []Migration) error {
- return accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err := accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
return InitializeWithContext(ctx, tx, migrations)
})
+
+ var sqlError *sqlite3.Error
+ if errors.As(err, &sqlError) {
+ return fmt.Errorf("%w. Sql error - Code: %d, Extended Code: %d", err, sqlError.Code, sqlError.ExtendedCode)
+ }
+
+ return err
+
}
// InitializeWithContext creates or upgrades a DB accessor.
diff --git a/util/db/queryable.go b/util/db/interfaces.go
index c76e506b3..d607c08d6 100644
--- a/util/db/queryable.go
+++ b/util/db/interfaces.go
@@ -17,6 +17,7 @@
package db
import (
+ "context"
"database/sql"
)
@@ -31,5 +32,14 @@ import (
type Queryable interface {
Prepare(query string) (*sql.Stmt, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
+ QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
+}
+
+// Executable is similar but has write methods as well.
+type Executable interface {
+ Queryable
+ Exec(query string, args ...interface{}) (sql.Result, error)
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
}